repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
Adai0808/scikit-learn | sklearn/grid_search.py | 32 | 36586 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if estimator is a classifier
and the target y is binary or multiclass, or the number
of folds in KFold otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if estimator is a classifier
and the target y is binary or multiclass, or the number
of folds in KFold otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
DistrictDataLabs/machine-learning | code/utils.py | 1 | 4163 | # utils
# Utility functions for handling data
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Thu Feb 26 17:47:35 2015 -0500
#
# Copyright (C) 2015 District Data Labs
# For license information, see LICENSE.txt
#
# ID: utils.py [] benjamin@bengfort.com $
"""
Utility functions for handling data
"""
##########################################################################
## Imports
##########################################################################
import os
import csv
import time
import json
import numpy as np
from sklearn.datasets.base import Bunch
##########################################################################
## Module Constants
##########################################################################
SKL_DATA = "SCIKIT_LEARN_DATA"
BASE_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__), ".."))
DATA_DIR = os.path.join(BASE_DIR, "data")
CODE_DIR = os.path.join(BASE_DIR, "code")
##########################################################################
## Helper Functions
##########################################################################
def timeit(func):
"""
Returns how long a function took to execute, along with the output
"""
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
return result, time.time() - start
return wrapper
##########################################################################
## Dataset Loading
##########################################################################
def get_data_home(data_home=None):
"""
Returns the path of the data directory
"""
if data_home is None:
data_home = os.environ.get(SKL_DATA, DATA_DIR)
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
def load_data(path, descr=None, target_index=-1):
"""
Returns a scklearn dataset Bunch which includes several important
attributes that are used in modeling:
data: array of shape n_samples * n_features
target: array of length n_samples
feature_names: names of the features
target_names: names of the targets
filenames: names of the files that were loaded
DESCR: contents of the readme
This data therefore has the look and feel of the toy datasets.
Pass in a path usually just the name of the location in the data dir.
It will be joined with the result of `get_data_home`. The contents are:
path
- README.md # The file to load into DESCR
- meta.json # A file containing metadata to load
- dataset.txt # The numpy loadtxt file
You can specify another descr, another feature_names, and whether or
not the dataset has a header row. You can also specify the index of the
target, which by default is the last item in the row (-1)
"""
root = os.path.join(get_data_home(), path)
filenames = {
'meta': os.path.join(root, 'meta.json'),
'rdme': os.path.join(root, 'README.md'),
'data': os.path.join(root, 'dataset.txt'),
}
target_names = None
feature_names = None
DESCR = None
with open(filenames['meta'], 'r') as f:
meta = json.load(f)
target_names = meta['target_names']
feature_names = meta['feature_names']
with open(filenames['rdme'], 'r') as f:
DESCR = f.read()
dataset = np.loadtxt(filenames['data'])
data = None
target = None
# Target assumed to be either last or first row
if target_index == -1:
data = dataset[:,0:-1]
target = dataset[:,-1]
elif target_index == 0:
data = dataset[:,1:]
target = dataset[:,0]
else:
raise ValueError("Target index must be either -1 or 0")
return Bunch(data=data,
target=target,
filenames=filenames,
target_names=target_names,
feature_names=feature_names,
DESCR=DESCR)
def load_wheat():
return load_data('wheat')
| mit |
mugizico/scikit-learn | examples/neighbors/plot_species_kde.py | 282 | 4059 | """
================================================
Kernel Density Estimate of Species Distributions
================================================
This shows an example of a neighbors-based query (in particular a kernel
density estimate) on geospatial data, using a Ball Tree built upon the
Haversine distance metric -- i.e. distances over points in latitude/longitude.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
This example does not perform any learning over the data
(see :ref:`example_applications_plot_species_distribution_modeling.py` for
an example of classification based on the attributes in this dataset). It
simply shows the kernel density estimate of observed data points in
geospatial coordinates.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn.neighbors import KernelDensity
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
# Get matrices/arrays of species IDs and locations
data = fetch_species_distributions()
species_names = ['Bradypus Variegatus', 'Microryzomys Minutus']
Xtrain = np.vstack([data['train']['dd lat'],
data['train']['dd long']]).T
ytrain = np.array([d.decode('ascii').startswith('micro')
for d in data['train']['species']], dtype='int')
Xtrain *= np.pi / 180. # Convert lat/long to radians
# Set up the data grid for the contour plot
xgrid, ygrid = construct_grids(data)
X, Y = np.meshgrid(xgrid[::5], ygrid[::5][::-1])
land_reference = data.coverages[6][::5, ::5]
land_mask = (land_reference > -9999).ravel()
xy = np.vstack([Y.ravel(), X.ravel()]).T
xy = xy[land_mask]
xy *= np.pi / 180.
# Plot map of South America with distributions of each species
fig = plt.figure()
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05)
for i in range(2):
plt.subplot(1, 2, i + 1)
# construct a kernel density estimate of the distribution
print(" - computing KDE in spherical coordinates")
kde = KernelDensity(bandwidth=0.04, metric='haversine',
kernel='gaussian', algorithm='ball_tree')
kde.fit(Xtrain[ytrain == i])
# evaluate only on the land: -9999 indicates ocean
Z = -9999 + np.zeros(land_mask.shape[0])
Z[land_mask] = np.exp(kde.score_samples(xy))
Z = Z.reshape(X.shape)
# plot contours of the density
levels = np.linspace(0, Z.max(), 25)
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
plt.title(species_names[i])
plt.show()
| bsd-3-clause |
JiaMingLin/de-identification | test/test_common.py | 1 | 3809 | import common.constant as c
from common.data_utilities import DataUtils
from common.base import Base
from django.test import TestCase
TESTING_FILE = c.TEST_ORIGIN_DATA_PATH
class DataUtilitiesTests(TestCase):
# TODO: The Data Coarse and Generalize step should seperate, to simulate a more real case.
def setUp(self):
self.selected_attrs = dict({
'Age':'C',
'workclass':'D',
'fnlwgt':'C',
'education':'D',
'education_num':'D',
'marital_status':'D',
'occupation':'D',
'relationship':'D',
'race':'D',
'sex':'D',
'capital_gain':'C',
'capital_loss':'C',
'hours_per_week':'C',
'native_country':'D',
'salary_class':'D'
})
self.data = DataUtils(file_path = TESTING_FILE, selected_attrs = self.selected_attrs)
self.data.data_coarsilize()
self.base = Base()
def test_data_preview(self):
data = DataUtils(file_path = TESTING_FILE)
preview = data.data_preview()
self.assertEqual(len(preview.values[0]) > 0, True)
def test_read_data_by_three_selected_column(self):
"""
Test the read data by user specified columns
"""
self.assertEqual(len(self.data.get_nodes_name()) == len(self.selected_attrs), True)
def test_data_domain_keep_original_order(self):
"""
Test the order in domain object is in same order with
original raw data.
"""
df = self.data.get_pandas_df()
domain = self.data.get_domain()
cols = domain.keys()
self.assertEqual(cols == list(df.columns.values), True)
def test_data_coarsilization(self):
print self.data.get_pandas_df()[:5]
def test_data_generalization(self):
self.data.data_generalize()
print self.data.get_pandas_df()[:5]
def test_is_skip_pre_processing_with_create(self):
create_flag = True
request = {
'data_path':'/path/to/dummy/file.csv',
'selected_attrs':{
'names':['A', 'D', 'C', 'B'],
'types':['C', 'C', 'C', 'D']
}
}
instance = {
'data_path':'/path/to/dummy/file.csv',
'selected_attrs':{
'names':['A', 'D', 'C', 'B'],
'types':['C', 'C', 'C', 'D']
}
}
skip_pre_process = self.base.is_pre_process_skip(request, request, create_flag)
self.assertEqual(skip_pre_process == False, True)
def test_is_skip_pre_processing_with_data_path_change(self):
create_flag = False
request = {
'data_path':'/path/to/dummy/file.csv',
'selected_attrs':{
'names':['A', 'D', 'C', 'B'],
'types':['C', 'C', 'C', 'D']
}
}
instance = {
'data_path':'/path/to/dummy/file22222222.csv',
'selected_attrs':{
'names':['A', 'D', 'C', 'B'],
'types':['C', 'C', 'C', 'D']
}
}
skip_pre_process = self.base.is_pre_process_skip(request, instance, create_flag)
print skip_pre_process
self.assertEqual(skip_pre_process == False, True)
def test_is_skip_pre_processing_with_selected_attr_change(self):
create_flag = False
request = {
'data_path':'/path/to/dummy/file.csv',
'selected_attrs':{
'names':['A', 'D', 'C', 'B'],
'types':['C', 'C', 'C', 'D']
}
}
instance = {
'data_path':'/path/to/dummy/file.csv',
'selected_attrs':{
'names':['A', 'D', 'C', 'B'],
'types':['D', 'D', 'D', 'D']
}
}
skip_pre_process = self.base.is_pre_process_skip(request, instance, create_flag)
print skip_pre_process
self.assertEqual(skip_pre_process == False, True)
def test_is_skip_pre_processing_without_change(self):
create_flag = False
request = {
'data_path':'/path/to/dummy/file.csv',
'selected_attrs':{
'names':['A', 'D', 'C', 'B'],
'types':['C', 'C', 'C', 'D']
}
}
instance = {
'data_path':'/path/to/dummy/file.csv',
'selected_attrs':{
'names':['A', 'D', 'C', 'B'],
'types':['C', 'C', 'C', 'D']
}
}
skip_pre_process = self.base.is_pre_process_skip(request, instance, create_flag)
self.assertEqual(skip_pre_process == True, True) | apache-2.0 |
fredhusser/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
brentjm/Impurity-Predictions | server/package/desiccant.py | 1 | 4140 | """
Desiccant class
Brent Maranzano
2016-04-16
"""
import pandas as pd
class Desiccant(object):
"""
Define the desiccant inside the container (e.g. type, amount, water...etc).
Class Attributes
ID : string - unique identification number to lookup parameters
name : string - Desiccant material.
mass : float - mass of desiccant (g)
water_content : float - mass fraction of water contained in the desiccant
(mass water (mg) / mass dry desiccant (g))
GAB_parameters : dictionary - {Wm, C, K} GAB constants from lookup file.
density: float - density of dry desiccant (g/cm^3)
water : float - total mass of water (g)
Class Methods
set_properties : Set the properties of the desiccant
refresh : Refresh the desiccant to a new water content
equilibrate : Equilibrate the deisccant to a provided RH and set the water content.
calc_water_content: Calculate the water content from GAB and passed water activity
"""
def __init__(self, ID, mass, **kwargs):
self.set_properties(ID, mass, **kwargs)
def set_properties(self, ID, mass, density=1.0, **kwargs):
"""
Set the properties of the desiccant.
Parameters
ID : string - Unique identification of the desiccant for the lookup.
mass : float - Mass of the desiccant.
optional kwargs
water_content : float - mass fraction of water in desiccant
(mass water (mg) / mass dry desiccant (g))
density : float - density of dry desiccant (g)
"""
store = pd.HDFStore("simulation_constants.hdf", mode="r")
GAB_constants = store["GAB_constants"]
store.close()
if ID in GAB_constants.index.values:
self.ID = ID
self.name = GAB_constants.loc["material"]["name"]
self.GAB_parameters = GAB_constants.loc[ID][["C", "K", "Wm"]].to_dict()
else:
raise ValueError("Desiccant type {} is not defined".format(ID))
self.mass = float(mass)
if "water_content" in kwargs:
self.water_content = float(kwargs["water_content"])
elif "initial_water_activity" in kwargs:
self.water_content = \
self.calc_water_content(kwargs["initial_water_activity"]) * 1.e3
else:
self.water_content = 20.
self.density = float(density)
self.water = self.water_content * self.mass * 1.e-3
def refresh(self, water_content=20., initial_activity=None):
"""
Refresh the desiccant (e.g. replace with equivalent desiccant
mass with lower water content). Specify either new water
content (water_content), or initial water activity of the
desiccant (initial_activity).
Parameters
water_content: float - Water content of the fresh desiccant
(mg water / g dry desiccant)
initial_activity: float - Water activity of the fresh desiccant (unitless)
"""
if initial_activity is None:
self.water_content = float(water_content)
else:
self.water_content = \
self.calc_water_content(float(initial_activity))
self.water = self.water_content * self.mass * 1.e-3
def equilibrate(self, aw):
"""
Equilibrate the desiccant to a new water activity.
Parameters
aw: float - Water activity to equilibrate desiccant.
"""
self.water_content = self.calc_water_content(aw)
self.water = self.water_content * self.mass * 1.e-3
def calc_water_content(self, aw):
"""
Calculate the water content from the GAB parameters at the provided
water activity.
Parameters
aw : float - Water activity
return: water_content (mg water / g desiccant)
"""
aw = float(aw)
Wm = self.GAB_parameters["Wm"]
C = self.GAB_parameters["C"]
K = self.GAB_parameters["K"]
water_content = (Wm*C*K*aw) / ((1-K*aw) * (1-K*aw+C*K*aw)) * 1.e3
return water_content
| bsd-2-clause |
ryanpdwyer/myscipkg2 | setup.py | 1 | 1557 | # -*- coding: utf-8 -*-
import sys
import io
try:
from setuptools import setup, find_packages
except ImportError:
print("Please install or upgrade setuptools or pip")
sys.exit(1)
readme = io.open('README.rst', mode='r', encoding='utf-8').read()
doclink = """
Documentation
-------------
The full documentation is at http://myscipkg2.rtfd.org."""
history = io.open('HISTORY.rst', mode='r',
encoding='utf-8').read().replace('.. :changelog:', '')
setup(
name='myscipkg2',
version='0.1.3',
description='A package for science using numpy, matplotlib, readthedocs, etc.',
long_description=readme + '\n\n' + doclink + '\n\n',
license='MIT',
author='Ryan Dwyer',
author_email='ryanpdwyer@gmail.com',
url='https://github.com/ryanpdwyer/myscipkg2',
zip_safe=False,
include_package_data=True,
# This lets setuptools include_package_data work with git
setup_requires=["setuptools_git >= 0.3"],
packages=find_packages(),
# Add requirements here. If the requirement is difficult to install,
# add to docs/conf.py MAGIC_MOCK, and .travis.yml 'conda install ...'
install_requires=[],
tests_require=['nose'],
test_suite='nose.collector',
keywords='myscipkg2',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
)
| mit |
bgris/ODL_bgris | lib/python3.5/site-packages/ipykernel/pylab/config.py | 10 | 4485 | """Configurable for configuring the IPython inline backend
This module does not import anything from matplotlib.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from traitlets.config.configurable import SingletonConfigurable
from traitlets import (
Dict, Instance, Set, Bool, TraitError, Unicode
)
#-----------------------------------------------------------------------------
# Configurable for inline backend options
#-----------------------------------------------------------------------------
def pil_available():
"""Test if PIL/Pillow is available"""
out = False
try:
from PIL import Image
out = True
except:
pass
return out
# inherit from InlineBackendConfig for deprecation purposes
class InlineBackendConfig(SingletonConfigurable):
pass
class InlineBackend(InlineBackendConfig):
"""An object to store configuration of the inline backend."""
# The typical default figure size is too large for inline use,
# so we shrink the figure size to 6x4, and tweak fonts to
# make that fit.
rc = Dict({'figure.figsize': (6.0,4.0),
# play nicely with white background in the Qt and notebook frontend
'figure.facecolor': (1,1,1,0),
'figure.edgecolor': (1,1,1,0),
# 12pt labels get cutoff on 6x4 logplots, so use 10pt.
'font.size': 10,
# 72 dpi matches SVG/qtconsole
# this only affects PNG export, as SVG has no dpi setting
'figure.dpi': 72,
# 10pt still needs a little more room on the xlabel:
'figure.subplot.bottom' : .125
},
help="""Subset of matplotlib rcParams that should be different for the
inline backend."""
).tag(config=True)
figure_formats = Set({'png'},
help="""A set of figure formats to enable: 'png',
'retina', 'jpeg', 'svg', 'pdf'.""").tag(config=True)
def _update_figure_formatters(self):
if self.shell is not None:
from IPython.core.pylabtools import select_figure_formats
select_figure_formats(self.shell, self.figure_formats, **self.print_figure_kwargs)
def _figure_formats_changed(self, name, old, new):
if 'jpg' in new or 'jpeg' in new:
if not pil_available():
raise TraitError("Requires PIL/Pillow for JPG figures")
self._update_figure_formatters()
figure_format = Unicode(help="""The figure format to enable (deprecated
use `figure_formats` instead)""").tag(config=True)
def _figure_format_changed(self, name, old, new):
if new:
self.figure_formats = {new}
print_figure_kwargs = Dict({'bbox_inches' : 'tight'},
help="""Extra kwargs to be passed to fig.canvas.print_figure.
Logical examples include: bbox_inches, quality (for jpeg figures), etc.
"""
).tag(config=True)
_print_figure_kwargs_changed = _update_figure_formatters
close_figures = Bool(True,
help="""Close all figures at the end of each cell.
When True, ensures that each cell starts with no active figures, but it
also means that one must keep track of references in order to edit or
redraw figures in subsequent cells. This mode is ideal for the notebook,
where residual plots from other cells might be surprising.
When False, one must call figure() to create new figures. This means
that gcf() and getfigs() can reference figures created in other cells,
and the active figure can continue to be edited with pylab/pyplot
methods that reference the current active figure. This mode facilitates
iterative editing of figures, and behaves most consistently with
other matplotlib backends, but figure barriers between cells must
be explicit.
""").tag(config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
| gpl-3.0 |
karoraw1/GLM_Wrapper | bin/CleanUpMetaboliteData.py | 1 | 1977 | # -*- coding: utf-8 -*-
"""
Created on Mon May 16 15:47:12 2016
@author: login
"""
import pandas as pd
import numpy as np
import os, sys
def strip(text):
try:
return text.strip()
except AttributeError:
return text
data_fn = "MysticLakeIons_Clean.csv"
data2_fn = "MysticLakeIons_Part2_Clean.csv"
data_path = os.path.join("..","waterData", data_fn)
data2_path = os.path.join("..","waterData", data2_fn)
ions_df = pd.read_csv(data_path, parse_dates=["Collection Date"],
converters = {'Sample Name' : strip})
ions2_df = pd.read_csv(data2_path, parse_dates=["Collection_Date"],
converters = {'Sample Name' : strip})
matchDepthandDate = {}
counter = 0
for i in xrange(ions2_df.shape[0]):
new_meas = ions2_df.iloc[i,:]
test1=False
test2=False
for j in xrange(ions_df.shape[0]):
old_meas = ions_df.iloc[j, :]
test1=np.equal(new_meas.Collection_Date, old_meas["Collection Date"])
try:
o_m = int(old_meas["Depth"])
n_m = int(new_meas["Depth"])
test2=np.equal(n_m, o_m)
if (test1 and test2) == True:
matchDepthandDate[counter] = (new_meas.Collection_Date,
new_meas["Depth"],
old_meas["Collection Date"],
old_meas["Depth"])
counter+=1
except ValueError:
o_m = str(old_meas["Depth"])
n_m = str(new_meas["Depth"])
test2= n_m == o_m
if (test1 and test2) == True:
matchDepthandDate[counter] = (new_meas.Collection_Date,
new_meas["Depth"],
old_meas["Collection Date"],
old_meas["Depth"])
counter+=1
| mit |
StratsOn/zipline | tests/test_rolling_panel.py | 20 | 7005 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from collections import deque
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from zipline.utils.data import MutableIndexRollingPanel, RollingPanel
from zipline.finance.trading import with_environment
class TestRollingPanel(unittest.TestCase):
@with_environment()
def test_alignment(self, env):
items = ('a', 'b')
sids = (1, 2)
dts = env.market_minute_window(
env.open_and_closes.market_open[0], 4,
).values
rp = RollingPanel(2, items, sids, initial_dates=dts[1:-1])
frame = pd.DataFrame(
data=np.arange(4).reshape((2, 2)),
columns=sids,
index=items,
)
nan_arr = np.empty((2, 6))
nan_arr.fill(np.nan)
rp.add_frame(dts[-1], frame)
cur = rp.get_current()
data = np.array((((np.nan, np.nan),
(0, 1)),
((np.nan, np.nan),
(2, 3))),
float)
expected = pd.Panel(
data,
major_axis=dts[2:],
minor_axis=sids,
items=items,
)
expected.major_axis = expected.major_axis.tz_localize('utc')
tm.assert_panel_equal(
cur,
expected,
)
rp.extend_back(dts[:-2])
cur = rp.get_current()
data = np.array((((np.nan, np.nan),
(np.nan, np.nan),
(np.nan, np.nan),
(0, 1)),
((np.nan, np.nan),
(np.nan, np.nan),
(np.nan, np.nan),
(2, 3))),
float)
expected = pd.Panel(
data,
major_axis=dts,
minor_axis=sids,
items=items,
)
expected.major_axis = expected.major_axis.tz_localize('utc')
tm.assert_panel_equal(
cur,
expected,
)
@with_environment()
def test_get_current_multiple_call_same_tick(self, env):
"""
In old get_current, each call the get_current would copy the data. Thus
changing that object would have no side effects.
To keep the same api, make sure that the raw option returns a copy too.
"""
def data_id(values):
return values.__array_interface__['data']
items = ('a', 'b')
sids = (1, 2)
dts = env.market_minute_window(
env.open_and_closes.market_open[0], 4,
).values
rp = RollingPanel(2, items, sids, initial_dates=dts[1:-1])
frame = pd.DataFrame(
data=np.arange(4).reshape((2, 2)),
columns=sids,
index=items,
)
nan_arr = np.empty((2, 6))
nan_arr.fill(np.nan)
rp.add_frame(dts[-1], frame)
# each get_current call makea a copy
cur = rp.get_current()
cur2 = rp.get_current()
assert data_id(cur.values) != data_id(cur2.values)
# make sure raw follow same logic
raw = rp.get_current(raw=True)
raw2 = rp.get_current(raw=True)
assert data_id(raw) != data_id(raw2)
class TestMutableIndexRollingPanel(unittest.TestCase):
def test_basics(self, window=10):
items = ['bar', 'baz', 'foo']
minor = ['A', 'B', 'C', 'D']
rp = MutableIndexRollingPanel(window, items, minor, cap_multiple=2)
dates = pd.date_range('2000-01-01', periods=30, tz='utc')
major_deque = deque(maxlen=window)
frames = {}
for i, date in enumerate(dates):
frame = pd.DataFrame(np.random.randn(3, 4), index=items,
columns=minor)
rp.add_frame(date, frame)
frames[date] = frame
major_deque.append(date)
result = rp.get_current()
expected = pd.Panel(frames, items=list(major_deque),
major_axis=items, minor_axis=minor)
tm.assert_panel_equal(result, expected.swapaxes(0, 1))
def test_adding_and_dropping_items(self, n_items=5, n_minor=10, window=10,
periods=30):
np.random.seed(123)
items = deque(range(n_items))
minor = deque(range(n_minor))
expected_items = deque(range(n_items))
expected_minor = deque(range(n_minor))
first_non_existant = max(n_items, n_minor) + 1
# We want to add new columns with random order
add_items = np.arange(first_non_existant, first_non_existant + periods)
np.random.shuffle(add_items)
rp = MutableIndexRollingPanel(window, items, minor, cap_multiple=2)
dates = pd.date_range('2000-01-01', periods=periods, tz='utc')
frames = {}
expected_frames = deque(maxlen=window)
expected_dates = deque()
for i, (date, add_item) in enumerate(zip(dates, add_items)):
frame = pd.DataFrame(np.random.randn(n_items, n_minor),
index=items, columns=minor)
if i >= window:
# Old labels and dates should start to get dropped at every
# call
del frames[expected_dates.popleft()]
expected_minor.popleft()
expected_items.popleft()
expected_frames.append(frame)
expected_dates.append(date)
rp.add_frame(date, frame)
frames[date] = frame
result = rp.get_current()
np.testing.assert_array_equal(sorted(result.minor_axis.values),
sorted(expected_minor))
np.testing.assert_array_equal(sorted(result.items.values),
sorted(expected_items))
tm.assert_frame_equal(frame.T,
result.ix[frame.index, -1, frame.columns])
expected_result = pd.Panel(frames).swapaxes(0, 1)
tm.assert_panel_equal(expected_result,
result)
# Insert new items
minor.popleft()
minor.append(add_item)
items.popleft()
items.append(add_item)
expected_minor.append(add_item)
expected_items.append(add_item)
| apache-2.0 |
madjelan/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 127 | 37672 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
| bsd-3-clause |
cython-testbed/pandas | pandas/core/indexes/timedeltas.py | 1 | 26186 | """ implement the TimedeltaIndex """
from datetime import datetime
import numpy as np
from pandas.core.dtypes.common import (
_TD_DTYPE,
is_integer,
is_float,
is_list_like,
is_scalar,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
pandas_dtype,
ensure_int64)
from pandas.core.dtypes.missing import isna
from pandas.core.arrays.timedeltas import (
TimedeltaArrayMixin, _is_convertible_to_td, _to_m8)
from pandas.core.arrays import datetimelike as dtl
from pandas.core.indexes.base import Index
from pandas.core.indexes.numeric import Int64Index
import pandas.compat as compat
from pandas.tseries.frequencies import to_offset
from pandas.core.base import _shared_docs
from pandas.core.indexes.base import _index_shared_docs
import pandas.core.common as com
import pandas.core.dtypes.concat as _concat
from pandas.util._decorators import Appender, Substitution
from pandas.core.indexes.datetimelike import (
TimelikeOps, DatetimeIndexOpsMixin, wrap_arithmetic_op,
wrap_array_method, wrap_field_accessor)
from pandas.core.tools.timedeltas import (
to_timedelta, _coerce_scalar_to_timedelta_type)
from pandas._libs import (lib, index as libindex,
join as libjoin, Timedelta, NaT)
class TimedeltaIndex(TimedeltaArrayMixin, DatetimeIndexOpsMixin,
TimelikeOps, Int64Index):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects
Parameters
----------
data : array-like (1-dimensional), optional
Optional timedelta-like data to construct index with
unit: unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
which is an integer/float number
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation
copy : bool
Make a copy of input ndarray
start : starting value, timedelta-like, optional
If data is None, start is used as the start point in generating regular
timedelta data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, timedelta-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
name : object
Name to be stored in the index
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
See Also
---------
Index : The base pandas Index type
Timedelta : Represents a duration between two dates or times.
DatetimeIndex : Index of datetime64 data
PeriodIndex : Index of Period data
Attributes
----------
days
seconds
microseconds
nanoseconds
components
inferred_freq
Methods
-------
to_pytimedelta
to_series
round
floor
ceil
to_frame
"""
_typ = 'timedeltaindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(
joinf, dtype='m8[ns]', **kwargs)
_inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(libjoin.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique_int64, with_indexers=False)
# define my properties & methods for delegation
_other_ops = []
_bool_ops = []
_object_ops = ['freq']
_field_ops = ['days', 'seconds', 'microseconds', 'nanoseconds']
_datetimelike_ops = _field_ops + _object_ops + _bool_ops
_datetimelike_methods = ["to_pytimedelta", "total_seconds",
"round", "floor", "ceil"]
_engine_type = libindex.TimedeltaEngine
_comparables = ['name', 'freq']
_attributes = ['name', 'freq']
_is_numeric_dtype = True
_infer_as_myclass = True
_freq = None
def __new__(cls, data=None, unit=None, freq=None, start=None, end=None,
periods=None, closed=None, dtype=None, copy=False,
name=None, verify_integrity=True):
if isinstance(data, TimedeltaIndex) and freq is None and name is None:
if copy:
return data.copy()
else:
return data._shallow_copy()
freq, freq_infer = dtl.maybe_infer_freq(freq)
if data is None:
# TODO: Remove this block and associated kwargs; GH#20535
if freq is None and com._any_none(periods, start, end):
raise ValueError('Must provide freq argument if no data is '
'supplied')
periods = dtl.validate_periods(periods)
return cls._generate_range(start, end, periods, name, freq,
closed=closed)
if unit is not None:
data = to_timedelta(data, unit=unit, box=False)
if is_scalar(data):
raise ValueError('TimedeltaIndex() must be called with a '
'collection of some kind, {data} was passed'
.format(data=repr(data)))
# convert if not already
if getattr(data, 'dtype', None) != _TD_DTYPE:
data = to_timedelta(data, unit=unit, box=False)
elif copy:
data = np.array(data, copy=True)
subarr = cls._simple_new(data, name=name, freq=freq)
# check that we are matching freqs
if verify_integrity and len(subarr) > 0:
if freq is not None and not freq_infer:
cls._validate_frequency(subarr, freq)
if freq_infer:
inferred = subarr.inferred_freq
if inferred:
subarr.freq = to_offset(inferred)
return subarr
@classmethod
def _generate_range(cls, start, end, periods,
name=None, freq=None, closed=None):
# TimedeltaArray gets `name` via **kwargs, so we need to explicitly
# override it if name is passed as a positional argument
return super(TimedeltaIndex, cls)._generate_range(start, end,
periods, freq,
name=name,
closed=closed)
@classmethod
def _simple_new(cls, values, name=None, freq=None, **kwargs):
result = super(TimedeltaIndex, cls)._simple_new(values, freq, **kwargs)
result.name = name
result._reset_identity()
return result
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_timedelta64
return _get_format_timedelta64(self, box=True)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(TimedeltaIndex, self).__setstate__(state)
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
def _evaluate_with_timedelta_like(self, other, op):
result = TimedeltaArrayMixin._evaluate_with_timedelta_like(self, other,
op)
return wrap_arithmetic_op(self, other, result)
def _format_native_types(self, na_rep=u'NaT', date_format=None, **kwargs):
from pandas.io.formats.format import Timedelta64Formatter
return Timedelta64Formatter(values=self,
nat_rep=na_rep,
justify='all').get_result()
days = wrap_field_accessor(TimedeltaArrayMixin.days)
seconds = wrap_field_accessor(TimedeltaArrayMixin.seconds)
microseconds = wrap_field_accessor(TimedeltaArrayMixin.microseconds)
nanoseconds = wrap_field_accessor(TimedeltaArrayMixin.nanoseconds)
total_seconds = wrap_array_method(TimedeltaArrayMixin.total_seconds, True)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_timedelta64_dtype(dtype) and not is_timedelta64_ns_dtype(dtype):
# return an index (essentially this is division)
result = self.values.astype(dtype, copy=copy)
if self.hasnans:
values = self._maybe_mask_results(result, convert='float64')
return Index(values, name=self.name)
return Index(result.astype('i8'), name=self.name)
return super(TimedeltaIndex, self).astype(dtype, copy=copy)
def union(self, other):
"""
Specialized union for TimedeltaIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
this, other = self, other
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, TimedeltaIndex):
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
"""
See Index.join
"""
if _is_convertible_to_index(other):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
return Index.join(self, other, how=how, level=level,
return_indexers=return_indexers,
sort=sort)
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, TimedeltaIndex) and self.freq == other.freq and
self._can_fast_union(other)):
joined = self._shallow_copy(joined, name=name)
return joined
else:
return self._simple_new(joined, name)
def _can_fast_union(self, other):
if not isinstance(other, TimedeltaIndex):
return False
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = _concat._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self._simple_new(result, name=name, freq=None)
def intersection(self, other):
"""
Specialized intersection for TimedeltaIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _maybe_promote(self, other):
if other.inferred_type == 'timedelta':
other = TimedeltaIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if _is_convertible_to_td(key):
key = Timedelta(key)
return self.get_value_maybe_box(series, key)
try:
return com.maybe_box(self, Index.get_value(self, series, key),
series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
if not isinstance(key, Timedelta):
key = Timedelta(key)
values = self._engine.get_value(com.values_from_object(series), key)
return com.maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if is_list_like(key) or (isinstance(key, datetime) and key is not NaT):
# GH#20464 datetime check here is to ensure we don't allow
# datetime objects to be incorrectly treated as timedelta
# objects; NaT is a special case because it plays a double role
# as Not-A-Timedelta
raise TypeError
if isna(key):
key = NaT
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance, np.asarray(key))
if _is_convertible_to_td(key):
key = Timedelta(key)
return Index.get_loc(self, key, method, tolerance)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timedelta(key)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to timedelta according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
"""
assert kind in ['ix', 'loc', 'getitem', None]
if isinstance(label, compat.string_types):
parsed = _coerce_scalar_to_timedelta_type(label, box=True)
lbound = parsed.round(parsed.resolution)
if side == 'left':
return lbound
else:
return (lbound + to_offset(parsed.resolution) -
Timedelta(1, 'ns'))
elif ((is_integer(label) or is_float(label)) and
not is_timedelta64_dtype(label)):
self._invalid_indexer('slice', label)
return label
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
if is_integer(key) or is_float(key) or key is NaT:
self._invalid_indexer('slice', key)
loc = self._partial_td_slice(key, freq, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def _partial_td_slice(self, key, freq, use_lhs=True, use_rhs=True):
# given a key, try to figure out a location for a partial slice
if not isinstance(key, compat.string_types):
return key
raise NotImplementedError
# TODO(wesm): dead code
# parsed = _coerce_scalar_to_timedelta_type(key, box=True)
# is_monotonic = self.is_monotonic
# # figure out the resolution of the passed td
# # and round to it
# # t1 = parsed.round(reso)
# t2 = t1 + to_offset(parsed.resolution) - Timedelta(1, 'ns')
# stamps = self.asi8
# if is_monotonic:
# # we are out of range
# if (len(stamps) and ((use_lhs and t1.value < stamps[0] and
# t2.value < stamps[0]) or
# ((use_rhs and t1.value > stamps[-1] and
# t2.value > stamps[-1])))):
# raise KeyError
# # a monotonic (sorted) series can be sliced
# left = (stamps.searchsorted(t1.value, side='left')
# if use_lhs else None)
# right = (stamps.searchsorted(t2.value, side='right')
# if use_rhs else None)
# return slice(left, right)
# lhs_mask = (stamps >= t1.value) if use_lhs else True
# rhs_mask = (stamps <= t2.value) if use_rhs else True
# # try to find a the dates
# return (lhs_mask & rhs_mask).nonzero()[0]
@Substitution(klass='TimedeltaIndex')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, (np.ndarray, Index)):
value = np.array(value, dtype=_TD_DTYPE, copy=False)
else:
value = _to_m8(value)
return self.values.searchsorted(value, side=side, sorter=sorter)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'timedelta'
@property
def inferred_type(self):
return 'timedelta64'
@property
def is_all_dates(self):
return True
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
# try to convert if possible
if _is_convertible_to_td(item):
try:
item = Timedelta(item)
except Exception:
pass
elif is_scalar(item) and isna(item):
# GH 18295
item = self._na_value
freq = None
if isinstance(item, Timedelta) or (is_scalar(item) and isna(item)):
# check freq can be preserved on edge cases
if self.freq is not None:
if ((loc == 0 or loc == -len(self)) and
item + self.freq == self[0]):
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item)
try:
new_tds = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
return self._shallow_copy(new_tds, freq=freq)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, compat.string_types):
return self.astype(object).insert(loc, item)
raise TypeError(
"cannot insert TimedeltaIndex with incompatible label")
def delete(self, loc):
"""
Make a new TimedeltaIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : TimedeltaIndex
"""
new_tds = np.delete(self.asi8, loc)
freq = 'infer'
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(
ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
TimedeltaIndex._add_comparison_ops()
TimedeltaIndex._add_numeric_methods()
TimedeltaIndex._add_logical_methods_disabled()
TimedeltaIndex._add_datetimelike_methods()
def _is_convertible_to_index(other):
"""
return a boolean whether I can attempt conversion to a TimedeltaIndex
"""
if isinstance(other, TimedeltaIndex):
return True
elif (len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer', 'integer',
'mixed-integer-float', 'mixed')):
return True
return False
def timedelta_range(start=None, end=None, periods=None, freq=None,
name=None, closed=None):
"""
Return a fixed frequency TimedeltaIndex, with day as the default
frequency
Parameters
----------
start : string or timedelta-like, default None
Left bound for generating timedeltas
end : string or timedelta-like, default None
Right bound for generating timedeltas
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'
name : string, default None
Name of the resulting TimedeltaIndex
closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Returns
-------
rng : TimedeltaIndex
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``TimedeltaIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.timedelta_range(start='1 day', periods=4)
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``closed`` parameter specifies which endpoint is included. The default
behavior is to include both endpoints.
>>> pd.timedelta_range(start='1 day', periods=4, closed='right')
TimedeltaIndex(['2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
Only fixed frequencies can be passed, non-fixed frequencies such as
'M' (month end) will raise.
>>> pd.timedelta_range(start='1 day', end='2 days', freq='6H')
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq='6H')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.timedelta_range(start='1 day', end='5 days', periods=4)
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
'5 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
"""
if freq is None and com._any_none(periods, start, end):
freq = 'D'
return TimedeltaIndex(start=start, end=end, periods=periods,
freq=freq, name=name, closed=closed)
| bsd-3-clause |
mifads/pyscripts | emxgeo/readKoeppenGeiger.py | 1 | 3213 | #!/usr/bin/env python3
from collections import OrderedDict as odict
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
home=os.environ['HOME']
ifile= home + '/Work/LANDUSE/KoeppenGeiger/Koeppen-Geiger-ASCII.txt'
# Mar 2019, not used (yet)
#KG_boreal = 1
#KG_temperate = 2
#KG_medit = 3
#KG_tropical = 4
#KG_list = []
def getKoeppenGeiger(ifile=ifile,groupsWanted=[],plotsWanted=False):
""" File has:
Lat Lon Cls
-89.75 -179.75 EF
-89.75 -179.25 EF
-89.75 -178.75 EF
By default returns all 29 flags (Af, A...)
but can ask for just main, e.g. [A, B ]
"""
x=np.genfromtxt(ifile,names=True,dtype=None)
xlat=x['Lat']
xlon=x['Lon']
CLs = x['Cls'].astype('str') # default was S3
clu_list = list( np.unique(CLs) )
long_name = clu_list.copy() # Can expand one day
if len(groupsWanted) > 0:
nOut = 1 # Will merge eg. A+B+C
outList = [ ''.join(groupsWanted) ] # e.g. AB from A+B
else:
for n, c in enumerate(clu_list):
outList = clu_list.copy()
nOut = len(clu_list)
print('Wanted clus:', nOut, ':', outList)
#sys.exit()
lons = np.linspace(-179.75,179.75,720)
lats = np.linspace(-89.75,89.75,360)
print(type(clu_list), len(clu_list), len(lons), len(lats) )
# Could use bool, but int may simplify import one day
KoppenGieger=np.zeros([nOut,len(lats),len(lons)]) #,dtype=np.int)
#KG = np.zeros([360,720], dtype=np.int)
for n in range(len(xlat)):
ix = int ( (xlon[n]+180)*2 )
iy = int ( (xlat[n]+ 90)*2 )
c = CLs[n] # eg Dfc
c_index = clu_list.index(c)
if nOut == 1:
for code in groupsWanted:
if c.startswith(code):
#print( 'CODE ', c, c_index, code)
KoppenGieger[0,iy,ix] += 1
else:
KoppenGieger[c_index,iy,ix] += 1
# if c.startswith(('E','Dfc')): # typ= KG_boreal # polar, snow -> boreal
# elif c.startswith(('A','BWh','BSh')): # typ= KG_tropical # equatorial, arid-desert-hot -> trop
# elif c.startswith(('Csa','Csb')): # typ= KG_medit # Medit
# elif c.startswith(('C','D','B')): # B..s? steppe,
# typ= KG_temperate # temperate
# else:
# sys.exit()
#KG[iy,ix] = typ
outputs = odict()
#for n, clu in enumerate( clu_list ):
for n, clu in enumerate( outList ):
print('SUM ', n, clu, np.sum(KoppenGieger[n,:,:]) )
outputs[clu]=dict(units='frac',long_name=long_name[n],
data=KoppenGieger[n,:,:])
if plotsWanted:
plt.imshow(KoppenGieger[n,:,:],origin='lower',vmin=0.0,vmax=1.0)
plt.title(clu)
#plt.colorbar(shrink=0.5)
plt.savefig('PlotKoppenGieger_%s.png' % clu )
plt.close()
print('END', type(outputs), len(outputs) )
# return lons, lats, KG
return lons, lats, outputs
if __name__ == '__main__':
import emxcdf.makecdf as emx # Creates cdf file for a simple lonlat projection
lons, lats, kgdata = getKoeppenGeiger()
emx.create_cdf(kgdata,'Koeppen-Geiger-Mar2019.nc','i4',lons,lats,dbg=False)
# just test emx.create_cdf(kgdata,'Koeppen-Geiger-Aug2020.nc','i4',lons,lats,dbg=False)
# print('KG ', KG_boreal)
| gpl-3.0 |
hyperspy/hyperspy | hyperspy/tests/signals/test_2D_tools.py | 2 | 6999 | # Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from unittest import mock
import numpy as np
import numpy.testing as npt
import pytest
from scipy.misc import ascent, face
from scipy.ndimage import fourier_shift
import hyperspy.api as hs
from hyperspy.decorators import lazifyTestClass
def _generate_parameters():
parameters = []
for normalize_corr in [False, True]:
for reference in ['current', 'cascade', 'stat']:
parameters.append([normalize_corr, reference])
return parameters
@lazifyTestClass
class TestSubPixelAlign:
def setup_method(self, method):
ref_image = ascent()
center = np.array((256, 256))
shifts = np.array([(0.0, 0.0), (4.3, 2.13), (1.65, 3.58),
(-2.3, 2.9), (5.2, -2.1), (2.7, 2.9),
(5.0, 6.8), (-9.1, -9.5), (-9.0, -9.9),
(-6.3, -9.2)])
s = hs.signals.Signal2D(np.zeros((10, 100, 100)))
for i in range(10):
# Apply each sup-pixel shift using FFT and InverseFFT
offset_image = fourier_shift(np.fft.fftn(ref_image), shifts[i])
offset_image = np.fft.ifftn(offset_image).real
# Crop central regions of shifted images to avoid wrap around
s.data[i, ...] = offset_image[center[0]:center[0] + 100,
center[1]:center[1] + 100]
self.signal = s
self.shifts = shifts
def test_align_subpix(self):
# Align signal
s = self.signal
shifts = self.shifts
s.align2D(shifts=shifts)
# Compare by broadcasting
np.testing.assert_allclose(s.data[4], s.data[0], rtol=0.5)
s.estimate_shift2D(reference='cascade', sub_pixel_factor=10)
@pytest.mark.parametrize(("normalize_corr", "reference"),
_generate_parameters())
def test_estimate_subpix(self, normalize_corr, reference):
s = self.signal
shifts = s.estimate_shift2D(sub_pixel_factor=200,
normalize_corr=normalize_corr)
np.testing.assert_allclose(shifts, self.shifts, rtol=0.2, atol=0.2,
verbose=True)
@pytest.mark.parametrize(("plot"), [True, 'reuse'])
def test_estimate_subpix_plot(self, plot):
# To avoid this function plotting many figures and holding the test, we
# make sure the backend is set to `agg` in case it is set to something
# else in the testing environment
import matplotlib.pyplot as plt
plt.switch_backend('agg')
s = self.signal
s.estimate_shift2D(sub_pixel_factor=200, plot=plot)
def test_align_crop_error(self):
s = self.signal
shifts = self.shifts
s_size = np.array(s.axes_manager.signal_shape)
shifts[0] = s_size + 1
with pytest.raises(ValueError, match="Cannot crop signal"):
s.align2D(shifts=shifts, crop=True)
@lazifyTestClass
class TestAlignTools:
def setup_method(self, method):
im = face(gray=True)
self.ascent_offset = np.array((256, 256))
s = hs.signals.Signal2D(np.zeros((10, 100, 100)))
self.scales = np.array((0.1, 0.3))
self.offsets = np.array((-2, -3))
izlp = []
for ax, offset, scale in zip(
s.axes_manager.signal_axes, self.offsets, self.scales):
ax.scale = scale
ax.offset = offset
izlp.append(ax.value2index(0))
self.izlp = izlp
self.ishifts = np.array([(0, 0), (4, 2), (1, 3), (-2, 2), (5, -2),
(2, 2), (5, 6), (-9, -9), (-9, -9), (-6, -9)])
self.new_offsets = self.offsets - self.ishifts.min(0) * self.scales
zlp_pos = self.ishifts + self.izlp
for i in range(10):
slices = self.ascent_offset - zlp_pos[i, ...]
s.data[i, ...] = im[slices[0]:slices[0] + 100,
slices[1]:slices[1] + 100]
self.signal = s
# How image should be after successfull alignment
smin = self.ishifts.min(0)
smax = self.ishifts.max(0)
offsets = self.ascent_offset + self.offsets / self.scales - smin
size = np.array((100, 100)) - (smax - smin)
self.aligned = im[int(offsets[0]):int(offsets[0] + size[0]),
int(offsets[1]):int(offsets[1] + size[1])]
def test_estimate_shift(self):
s = self.signal
shifts = s.estimate_shift2D()
np.testing.assert_allclose(shifts, self.ishifts)
def test_align_no_shift(self):
s = self.signal
shifts = s.estimate_shift2D()
shifts.fill(0)
with pytest.warns(UserWarning, match="provided shifts are all zero"):
shifts = s.align2D(shifts=shifts)
assert shifts is None
def test_align_twice(self):
s = self.signal
s.align2D()
with pytest.warns(UserWarning, match="the images are already aligned"):
shifts = s.align2D()
assert shifts.sum() == 0
def test_align(self):
# Align signal
m = mock.Mock()
s = self.signal
s.events.data_changed.connect(m.data_changed)
s.align2D()
# Compare by broadcasting
assert np.all(s.data == self.aligned)
assert m.data_changed.called
def test_align_expand(self):
s = self.signal
s.align2D(expand=True)
# Check the numbers of NaNs to make sure expansion happened properly
ds = self.ishifts.max(0) - self.ishifts.min(0)
Nnan = np.sum(ds) * 100 + np.prod(ds)
Nnan_data = np.sum(1 * np.isnan(s.data), axis=(1, 2))
# Due to interpolation, the number of NaNs in the data might
# be 2 higher (left and right side) than expected
assert np.all(Nnan_data - Nnan <= 2)
# Check alignment is correct
d_al = s.data[:, ds[0]:-ds[0], ds[1]:-ds[1]]
assert np.all(d_al == self.aligned)
def test_add_ramp():
s = hs.signals.Signal2D(np.indices((3, 3)).sum(axis=0) + 4)
s.add_ramp(-1, -1, -4)
npt.assert_allclose(s.data, 0)
def test_add_ramp_lazy():
s = hs.signals.Signal2D(np.indices((3, 3)).sum(axis=0) + 4).as_lazy()
s.add_ramp(-1, -1, -4)
npt.assert_almost_equal(s.data.compute(), 0)
| gpl-3.0 |
miloharper/neural-network-animation | matplotlib/tests/test_text.py | 9 | 12004 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
import numpy as np
from numpy.testing import assert_almost_equal
from nose.tools import eq_
from matplotlib.transforms import Bbox
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison, cleanup
from matplotlib.figure import Figure
from matplotlib.text import Annotation, Text
from matplotlib.backends.backend_agg import RendererAgg
@image_comparison(baseline_images=['font_styles'])
def test_font_styles():
from matplotlib import _get_data_path
data_path = _get_data_path()
def find_matplotlib_font(**kw):
prop = FontProperties(**kw)
path = findfont(prop, directory=data_path)
return FontProperties(fname=path)
from matplotlib.font_manager import FontProperties, findfont
warnings.filterwarnings(
'ignore',
('findfont: Font family \[u?\'Foo\'\] not found. Falling back to .'),
UserWarning,
module='matplotlib.font_manager')
plt.figure()
ax = plt.subplot(1, 1, 1)
normalFont = find_matplotlib_font(
family="sans-serif",
style="normal",
variant="normal",
size=14)
ax.annotate(
"Normal Font",
(0.1, 0.1),
xycoords='axes fraction',
fontproperties=normalFont)
boldFont = find_matplotlib_font(
family="Foo",
style="normal",
variant="normal",
weight="bold",
stretch=500,
size=14)
ax.annotate(
"Bold Font",
(0.1, 0.2),
xycoords='axes fraction',
fontproperties=boldFont)
boldItemFont = find_matplotlib_font(
family="sans serif",
style="italic",
variant="normal",
weight=750,
stretch=500,
size=14)
ax.annotate(
"Bold Italic Font",
(0.1, 0.3),
xycoords='axes fraction',
fontproperties=boldItemFont)
lightFont = find_matplotlib_font(
family="sans-serif",
style="normal",
variant="normal",
weight=200,
stretch=500,
size=14)
ax.annotate(
"Light Font",
(0.1, 0.4),
xycoords='axes fraction',
fontproperties=lightFont)
condensedFont = find_matplotlib_font(
family="sans-serif",
style="normal",
variant="normal",
weight=500,
stretch=100,
size=14)
ax.annotate(
"Condensed Font",
(0.1, 0.5),
xycoords='axes fraction',
fontproperties=condensedFont)
ax.set_xticks([])
ax.set_yticks([])
@image_comparison(baseline_images=['multiline'])
def test_multiline():
plt.figure()
ax = plt.subplot(1, 1, 1)
ax.set_title("multiline\ntext alignment")
plt.text(
0.2, 0.5, "TpTpTp\n$M$\nTpTpTp", size=20, ha="center", va="top")
plt.text(
0.5, 0.5, "TpTpTp\n$M^{M^{M^{M}}}$\nTpTpTp", size=20,
ha="center", va="top")
plt.text(
0.8, 0.5, "TpTpTp\n$M_{q_{q_{q}}}$\nTpTpTp", size=20,
ha="center", va="top")
plt.xlim(0, 1)
plt.ylim(0, 0.8)
ax.set_xticks([])
ax.set_yticks([])
@image_comparison(baseline_images=['antialiased'], extensions=['png'])
def test_antialiasing():
matplotlib.rcParams['text.antialiased'] = True
fig = plt.figure(figsize=(5.25, 0.75))
fig.text(0.5, 0.75, "antialiased", horizontalalignment='center',
verticalalignment='center')
fig.text(0.5, 0.25, "$\sqrt{x}$", horizontalalignment='center',
verticalalignment='center')
# NOTE: We don't need to restore the rcParams here, because the
# test cleanup will do it for us. In fact, if we do it here, it
# will turn antialiasing back off before the images are actually
# rendered.
def test_afm_kerning():
from matplotlib.afm import AFM
from matplotlib.font_manager import findfont
fn = findfont("Helvetica", fontext="afm")
with open(fn, 'rb') as fh:
afm = AFM(fh)
assert afm.string_width_height('VAVAVAVAVAVA') == (7174.0, 718)
@image_comparison(baseline_images=['text_contains'], extensions=['png'])
def test_contains():
import matplotlib.backend_bases as mbackend
fig = plt.figure()
ax = plt.axes()
mevent = mbackend.MouseEvent(
'button_press_event', fig.canvas, 0.5, 0.5, 1, None)
xs = np.linspace(0.25, 0.75, 30)
ys = np.linspace(0.25, 0.75, 30)
xs, ys = np.meshgrid(xs, ys)
txt = plt.text(
0.48, 0.52, 'hello world', ha='center', fontsize=30, rotation=30)
# uncomment to draw the text's bounding box
# txt.set_bbox(dict(edgecolor='black', facecolor='none'))
# draw the text. This is important, as the contains method can only work
# when a renderer exists.
plt.draw()
for x, y in zip(xs.flat, ys.flat):
mevent.x, mevent.y = plt.gca().transAxes.transform_point([x, y])
contains, _ = txt.contains(mevent)
color = 'yellow' if contains else 'red'
# capture the viewLim, plot a point, and reset the viewLim
vl = ax.viewLim.frozen()
ax.plot(x, y, 'o', color=color)
ax.viewLim.set(vl)
@image_comparison(baseline_images=['titles'])
def test_titles():
# left and right side titles
plt.figure()
ax = plt.subplot(1, 1, 1)
ax.set_title("left title", loc="left")
ax.set_title("right title", loc="right")
ax.set_xticks([])
ax.set_yticks([])
@image_comparison(baseline_images=['text_alignment'])
def test_alignment():
plt.figure()
ax = plt.subplot(1, 1, 1)
x = 0.1
for rotation in (0, 30):
for alignment in ('top', 'bottom', 'baseline', 'center'):
ax.text(
x, 0.5, alignment + " Tj", va=alignment, rotation=rotation,
bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5))
ax.text(
x, 1.0, r'$\sum_{i=0}^{j}$', va=alignment, rotation=rotation)
x += 0.1
ax.plot([0, 1], [0.5, 0.5])
ax.plot([0, 1], [1.0, 1.0])
ax.set_xlim([0, 1])
ax.set_ylim([0, 1.5])
ax.set_xticks([])
ax.set_yticks([])
@image_comparison(baseline_images=['axes_titles'], extensions=['png'])
def test_axes_titles():
# Related to issue #3327
plt.figure()
ax = plt.subplot(1, 1, 1)
ax.set_title('center', loc='center', fontsize=20, fontweight=700)
ax.set_title('left', loc='left', fontsize=12, fontweight=400)
ax.set_title('right', loc='right', fontsize=12, fontweight=400)
@cleanup
def test_set_position():
fig, ax = plt.subplots()
# test set_position
ann = ax.annotate(
'test', (0, 0), xytext=(0, 0), textcoords='figure pixels')
plt.draw()
init_pos = ann.get_window_extent(fig.canvas.renderer)
shift_val = 15
ann.set_position((shift_val, shift_val))
plt.draw()
post_pos = ann.get_window_extent(fig.canvas.renderer)
for a, b in zip(init_pos.min, post_pos.min):
assert a + shift_val == b
# test xyann
ann = ax.annotate(
'test', (0, 0), xytext=(0, 0), textcoords='figure pixels')
plt.draw()
init_pos = ann.get_window_extent(fig.canvas.renderer)
shift_val = 15
ann.xyann = (shift_val, shift_val)
plt.draw()
post_pos = ann.get_window_extent(fig.canvas.renderer)
for a, b in zip(init_pos.min, post_pos.min):
assert a + shift_val == b
@image_comparison(baseline_images=['text_bboxclip'])
def test_bbox_clipping():
plt.text(0.9, 0.2, 'Is bbox clipped?', backgroundcolor='r', clip_on=True)
t = plt.text(0.9, 0.5, 'Is fancy bbox clipped?', clip_on=True)
t.set_bbox({"boxstyle": "round, pad=0.1"})
@image_comparison(baseline_images=['annotation_negative_coords'],
extensions=['png'])
def test_annotation_negative_coords():
fig = plt.figure()
ax = plt.subplot(1, 1, 1)
ax.annotate("+fpt", (15, 40), xycoords="figure points")
ax.annotate("+fpx", (25, 30), xycoords="figure pixels")
ax.annotate("+apt", (35, 20), xycoords="axes points")
ax.annotate("+apx", (45, 10), xycoords="axes pixels")
ax.annotate("-fpt", (-55, -40), xycoords="figure points")
ax.annotate("-fpx", (-45, -30), xycoords="figure pixels")
ax.annotate("-apt", (-35, -20), xycoords="axes points")
ax.annotate("-apx", (-25, -10), xycoords="axes pixels")
@cleanup
def test_text_annotation_get_window_extent():
figure = Figure(dpi=100)
renderer = RendererAgg(200, 200, 100)
# Only text annotation
annotation = Annotation('test', xy=(0, 0))
annotation.set_figure(figure)
text = Text(text='test', x=0, y=0)
text.set_figure(figure)
bbox = annotation.get_window_extent(renderer=renderer)
text_bbox = text.get_window_extent(renderer=renderer)
eq_(bbox.width, text_bbox.width)
eq_(bbox.height, text_bbox.height)
_, _, d = renderer.get_text_width_height_descent(
'text', annotation._fontproperties, ismath=False)
_, _, lp_d = renderer.get_text_width_height_descent(
'lp', annotation._fontproperties, ismath=False)
below_line = max(d, lp_d)
# These numbers are specific to the current implementation of Text
points = bbox.get_points()
eq_(points[0, 0], 0.0)
eq_(points[1, 0], text_bbox.width)
eq_(points[0, 1], -below_line)
eq_(points[1, 1], text_bbox.height - below_line)
@cleanup
def test_text_with_arrow_annotation_get_window_extent():
headwidth = 21
fig, ax = plt.subplots(dpi=100)
txt = ax.text(s='test', x=0, y=0)
ann = ax.annotate('test',
xy=(0.0, 50.0),
xytext=(50.0, 50.0), xycoords='figure pixels',
arrowprops={
'facecolor': 'black', 'width': 2,
'headwidth': headwidth, 'shrink': 0.0})
plt.draw()
renderer = fig.canvas.renderer
# bounding box of text
text_bbox = txt.get_window_extent(renderer=renderer)
# bounding box of annotation (text + arrow)
bbox = ann.get_window_extent(renderer=renderer)
# bounding box of arrow
arrow_bbox = ann.arrow.get_window_extent(renderer)
# bounding box of annotation text
ann_txt_bbox = Text.get_window_extent(ann)
# make sure annotation with in 50 px wider than
# just the text
eq_(bbox.width, text_bbox.width + 50.0)
# make sure the annotation text bounding box is same size
# as the bounding box of the same string as a Text object
eq_(ann_txt_bbox.height, text_bbox.height)
eq_(ann_txt_bbox.width, text_bbox.width)
# compute the expected bounding box of arrow + text
expected_bbox = Bbox.union([ann_txt_bbox, arrow_bbox])
assert_almost_equal(bbox.height, expected_bbox.height)
@cleanup
def test_arrow_annotation_get_window_extent():
figure = Figure(dpi=100)
figure.set_figwidth(2.0)
figure.set_figheight(2.0)
renderer = RendererAgg(200, 200, 100)
# Text annotation with arrow
annotation = Annotation(
'', xy=(0.0, 50.0), xytext=(50.0, 50.0), xycoords='figure pixels',
arrowprops={
'facecolor': 'black', 'width': 8, 'headwidth': 10, 'shrink': 0.0})
annotation.set_figure(figure)
annotation.draw(renderer)
bbox = annotation.get_window_extent()
points = bbox.get_points()
eq_(bbox.width, 50.0)
assert_almost_equal(bbox.height, 10.0 / 0.72)
eq_(points[0, 0], 0.0)
eq_(points[0, 1], 50.0 - 5 / 0.72)
@cleanup
def test_empty_annotation_get_window_extent():
figure = Figure(dpi=100)
figure.set_figwidth(2.0)
figure.set_figheight(2.0)
renderer = RendererAgg(200, 200, 100)
# Text annotation with arrow
annotation = Annotation(
'', xy=(0.0, 50.0), xytext=(0.0, 50.0), xycoords='figure pixels')
annotation.set_figure(figure)
annotation.draw(renderer)
bbox = annotation.get_window_extent()
points = bbox.get_points()
eq_(points[0, 0], 0.0)
eq_(points[1, 0], 0.0)
eq_(points[1, 1], 50.0)
eq_(points[0, 1], 50.0)
| mit |
Scapogo/zipline | tests/finance/test_transaction.py | 5 | 1137 | #
# Copyright 2017 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from unittest import TestCase
from zipline.assets import Equity
from zipline.finance.transaction import Transaction
class TransactionTestCase(TestCase):
def test_transaction_repr(self):
dt = pd.Timestamp('2017-01-01')
asset = Equity(1, exchange='test')
txn = Transaction(asset, amount=100, dt=dt, price=10, order_id=0)
expected = (
"Transaction(asset=Equity(1), dt=2017-01-01 00:00:00,"
" amount=100, price=10)"
)
self.assertEqual(repr(txn), expected)
| apache-2.0 |
untom/scikit-learn | examples/cluster/plot_cluster_comparison.py | 246 | 4684 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
gustable/Img-Recog | nl_dbn.py | 1 | 1729 |
import cPickle
import numpy as np
from sklearn.externals import joblib
from nolearn.dbn import DBN
def load(name):
with open(name, 'rb') as f:
return cPickle.load(f)
dataset1 = load('/home/gp/data/cifar-10-batches-py/data_batch_1')
dataset2 = load('/home/gp/data/cifar-10-batches-py/data_batch_2')
dataset3 = load('/home/gp/data/cifar-10-batches-py/data_batch_3')
dataset4 = load('/home/gp/data/cifar-10-batches-py/data_batch_4')
dataset5 = load('/home/gp/data/cifar-10-batches-py/data_batch_5')
test_batch = load('/home/gp/data/cifar-10-batches-py/test_batch')
data_train = np.vstack([dataset1['data'], dataset2['data'], dataset3['data'],dataset4['data'],dataset5['data']])
labels_train = np.hstack([dataset1['labels'], dataset2['labels'],dataset3['labels'],dataset4['labels'],dataset5['labels']])
data_train = data_train.astype('float') / 255.
labels_train = labels_train
data_test = test_batch['data'].astype('float') / 255.
labels_test = np.array(test_batch['labels'])
n_feat = data_train.shape[1]
n_targets = labels_train.max() + 1
net = DBN(
[n_feat, n_feat / 3, n_targets],
epochs=100,
learn_rates=0.01,
learn_rate_decays=0.99,
learn_rate_minimums=0.005,
verbose=1,
)
net.fit(data_train, labels_train)
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
expected = labels_test
predicted = net.predict(data_test)
print "Classification report for classifier %s:\n%s\n" % (
net, classification_report(expected, predicted))
print "Confusion matrix:\n%s" % confusion_matrix(expected, predicted)
print "prediction over expected" % predicted/expected
joblib.dump(net, 'nl_dbn.pkl', compress=9)
#nl_clone = joblib.load('nl_dbn.pkl')
| mit |
YubinXie/Computational-Pathology | Sample_Preprocess.py | 1 | 5387 | import cv2
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
from skimage.morphology import thin, closing, square
from skimage.util import invert
from skimage.color import rgb2gray, label2rgb
from skimage import data,img_as_ubyte
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label, regionprops
from PIL import Image
import scipy
OutputFolder = 'Output/'
InputFolder = '../RawInput/Tissue/'
#ImageList= ["459591"]#, "406786" ,"423690", "410200"]
image="459591"
def main(InputFolder,image,OutputFolder):
kernel = np.ones((5,5),np.uint8)
kernel_size=str(kernel.shape)
ReginThreshold=5000
print image
#Input sample and label files
Sample_Img = cv2.imread(InputFolder + image + ".jpg" , 0)
#Lable_Img = cv2.imread(InputFolder + image + ".svs_labels.bmp" , 0)
#Org_Lable_Img= img_as_ubyte(Image.open(InputFolder + image + ".svs_labels.bmp"))
Org_Sample_Img = img_as_ubyte(Image.open(InputFolder + image + ".jpg"))
## Sample thinning
# Otsu's thresholding after Gaussian filtering
blur = cv2.GaussianBlur(Sample_Img,(25,25),0)
ret1,th1 = cv2.threshold(blur,0,255,cv2.THRESH_OTSU)
Sample_Closing = cv2.morphologyEx(th1, cv2.MORPH_CLOSE, kernel)
Sample_Closing_Inverted = invert(Sample_Closing)
Sample_Closing_Inverted_Gray = rgb2gray(Sample_Closing_Inverted)
Sample_Closing_Inverted_Binary=np.where(Sample_Closing_Inverted_Gray>np.mean(Sample_Closing_Inverted_Gray),1,0)
Sample_Thinned = thin(Sample_Closing_Inverted_Binary,max_iter=10000 )
##Lale Process
#Label_Gray = rgb2gray(Lable_Img)
#Lable_Gray_Inverted = invert(Label_Gray)
#ret10,Lable_thresholding = cv2.threshold(Lable_Gray_Inverted,10,255,cv2.THRESH_BINARY) #Important
#Lable_thresholding_Binary=np.where(Lable_thresholding > np.mean(Lable_thresholding),1,0)
#BoundingBox(ReginThreshold):
thresh = threshold_otsu(Sample_Closing_Inverted_Binary)
bw = closing(Sample_Closing_Inverted_Binary > thresh, square(3))
## remove artifacts connected to image border
cleared = clear_border(bw)
Sample_Thinned = np.where(Sample_Thinned>np.mean(Sample_Thinned),1,0)
## label image regions
label_image = label(cleared)
image_label_overlay = label2rgb(label_image, image=Sample_Closing_Inverted_Binary)
number=0
fig, ax = plt.subplots(figsize=(10, 6))
#NewImage=Sample_Closing_Inverted_Binary
#w,l=Sample_Closing_Inverted_Binary.shape
#for i in range(w):
# for j in range(l):
# NewImage[i,j]=0
ax.imshow(image_label_overlay,cmap=plt.cm.gray)
#plt.imsave(OutputFolder+"Test_"+image+"_"+str(number) +".png",Sample_Closing,cmap=plt.cm.gray)
for region in regionprops(label_image):
#ax.imshow(region,cmap=plt.cm.gray)
#print region
if region.area >= ReginThreshold: #600
# dr3aw rectangle around segmented coins
#Coords= region.coords
#print Coords
#for list in range(len(region.coords)):
# NewImage[ region.coords[list][0],region.coords[list][1]]=1
#scipy.misc.imsave(str(number)+'outfile.jpg', NewImage)
minr, minc, maxr, maxc = region.bbox
Box_Sample_OrgImg = Org_Sample_Img[minr:maxr, minc:maxc]
#Box_Label_OrgImg = Org_Lable_Img[minr:maxr, minc:maxc]
Box_Sample = Sample_Thinned[minr:maxr, minc:maxc]
#Box_Label = Lable_thresholding_Binary[minr:maxr, minc:maxc]
#Box_Mixed= cv2.bitwise_or(img_as_ubyte(Box_Sample), img_as_ubyte(Box_Label))
width, length, height= Box_Sample_OrgImg.shape
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=1)
ax.add_patch(rect)
#Mix the sample and label file
#Box_OrgMixed=np.zeros(shape=(width, length, height))
Sample_Mixed=np.zeros(shape=(width, length, height))
for l in range(length):
for w in range(width):
#if Box_Label_OrgImg[w,l,0]>=255 & Box_Label_OrgImg[w,l,1]>=255 & Box_Label_OrgImg[w,l,2]>=255:
#Box_OrgMixed[w,l]=(Box_Sample_OrgImg[w,l])
#else:
#Box_OrgMixed[w,l]=(Box_Label_OrgImg[w,l])
if Box_Sample[w,l]==0:
Sample_Mixed[w,l]=(Box_Sample_OrgImg[w,l])
else:
Sample_Mixed[w,l]=(0,0,0)
#Reformat the image
#Box_OrgMixed=Box_OrgMixed.astype(np.uint8)
Sample_Mixed=Sample_Mixed.astype(np.uint8)
#plt.imsave(OutputFolder+"Segmentated_Thinned"+image+"_"+str(number) +".png",Box_Sample,cmap=plt.cm.gray)
##plt.imsave(OutputFolder+"Segmentated_Thinned"+image+"_"+str(number) +"_Label.png",Box_Label,cmap=plt.cm.gray)
#plt.imsave(OutputFolder+"Segmentated_Mixed_Thinned"+image+"_"+str(number) +".png",Sample_Mixed,cmap=plt.cm.gray)
number+=1
ax.set_axis_off()
plt.tight_layout()
#plt.show()
plt.savefig(OutputFolder + "BoundingBox_" + image+ "_RegionThreshold_" +str(ReginThreshold) +"kel25",dpi=300)
if __name__ == '__main__':
main(InputFolder,image,OutputFolder)
| gpl-2.0 |
JPFrancoia/scikit-learn | sklearn/utils/tests/test_metaestimators.py | 86 | 2304 | from sklearn.utils.testing import assert_true, assert_false
from sklearn.utils.metaestimators import if_delegate_has_method
class Prefix(object):
def func(self):
pass
class MockMetaEstimator(object):
"""This is a mock meta estimator"""
a_prefix = Prefix()
@if_delegate_has_method(delegate="a_prefix")
def func(self):
"""This is a mock delegated function"""
pass
def test_delegated_docstring():
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.__dict__['func'].__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.func.__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator().func.__doc__))
class MetaEst(object):
"""A mock meta estimator"""
def __init__(self, sub_est, better_sub_est=None):
self.sub_est = sub_est
self.better_sub_est = better_sub_est
@if_delegate_has_method(delegate='sub_est')
def predict(self):
pass
class MetaEstTestTuple(MetaEst):
"""A mock meta estimator to test passing a tuple of delegates"""
@if_delegate_has_method(delegate=('sub_est', 'better_sub_est'))
def predict(self):
pass
class MetaEstTestList(MetaEst):
"""A mock meta estimator to test passing a list of delegates"""
@if_delegate_has_method(delegate=['sub_est', 'better_sub_est'])
def predict(self):
pass
class HasPredict(object):
"""A mock sub-estimator with predict method"""
def predict(self):
pass
class HasNoPredict(object):
"""A mock sub-estimator with no predict method"""
pass
def test_if_delegate_has_method():
assert_true(hasattr(MetaEst(HasPredict()), 'predict'))
assert_false(hasattr(MetaEst(HasNoPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestTuple(HasNoPredict(), HasNoPredict()), 'predict'))
assert_true(
hasattr(MetaEstTestTuple(HasPredict(), HasNoPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestTuple(HasNoPredict(), HasPredict()), 'predict'))
assert_false(
hasattr(MetaEstTestList(HasNoPredict(), HasPredict()), 'predict'))
assert_true(
hasattr(MetaEstTestList(HasPredict(), HasPredict()), 'predict'))
| bsd-3-clause |
Insight-book/data-science-from-scratch | first-edition-ko/code-python3/gradient_descent.py | 12 | 5816 | from collections import Counter
from linear_algebra import distance, vector_subtract, scalar_multiply
from functools import reduce
import math, random
def sum_of_squares(v):
"""computes the sum of squared elements in v"""
return sum(v_i ** 2 for v_i in v)
def difference_quotient(f, x, h):
return (f(x + h) - f(x)) / h
def plot_estimated_derivative():
def square(x):
return x * x
def derivative(x):
return 2 * x
derivative_estimate = lambda x: difference_quotient(square, x, h=0.00001)
# plot to show they're basically the same
import matplotlib.pyplot as plt
x = range(-10,10)
plt.plot(x, map(derivative, x), 'rx') # red x
plt.plot(x, map(derivative_estimate, x), 'b+') # blue +
plt.show() # purple *, hopefully
def partial_difference_quotient(f, v, i, h):
# add h to just the i-th element of v
w = [v_j + (h if j == i else 0)
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h
def estimate_gradient(f, v, h=0.00001):
return [partial_difference_quotient(f, v, i, h)
for i, _ in enumerate(v)]
def step(v, direction, step_size):
"""move step_size in the direction from v"""
return [v_i + step_size * direction_i
for v_i, direction_i in zip(v, direction)]
def sum_of_squares_gradient(v):
return [2 * v_i for v_i in v]
def safe(f):
"""define a new function that wraps f and return it"""
def safe_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
return float('inf') # this means "infinity" in Python
return safe_f
#
#
# minimize / maximize batch
#
#
def minimize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
"""use gradient descent to find theta that minimizes target function"""
step_sizes = [100, 10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
theta = theta_0 # set theta to initial value
target_fn = safe(target_fn) # safe version of target_fn
value = target_fn(theta) # value we're minimizing
while True:
gradient = gradient_fn(theta)
next_thetas = [step(theta, gradient, -step_size)
for step_size in step_sizes]
# choose the one that minimizes the error function
next_theta = min(next_thetas, key=target_fn)
next_value = target_fn(next_theta)
# stop if we're "converging"
if abs(value - next_value) < tolerance:
return theta
else:
theta, value = next_theta, next_value
def negate(f):
"""return a function that for any input x returns -f(x)"""
return lambda *args, **kwargs: -f(*args, **kwargs)
def negate_all(f):
"""the same when f returns a list of numbers"""
return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]
def maximize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
return minimize_batch(negate(target_fn),
negate_all(gradient_fn),
theta_0,
tolerance)
#
# minimize / maximize stochastic
#
def in_random_order(data):
"""generator that returns the elements of data in random order"""
indexes = [i for i, _ in enumerate(data)] # create a list of indexes
random.shuffle(indexes) # shuffle them
for i in indexes: # return the data in that order
yield data[i]
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
data = list(zip(x, y))
theta = theta_0 # initial guess
alpha = alpha_0 # initial step size
min_theta, min_value = None, float("inf") # the minimum so far
iterations_with_no_improvement = 0
# if we ever go 100 iterations with no improvement, stop
while iterations_with_no_improvement < 100:
value = sum( target_fn(x_i, y_i, theta) for x_i, y_i in data )
if value < min_value:
# if we've found a new minimum, remember it
# and go back to the original step size
min_theta, min_value = theta, value
iterations_with_no_improvement = 0
alpha = alpha_0
else:
# otherwise we're not improving, so try shrinking the step size
iterations_with_no_improvement += 1
alpha *= 0.9
# and take a gradient step for each of the data points
for x_i, y_i in in_random_order(data):
gradient_i = gradient_fn(x_i, y_i, theta)
theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))
return min_theta
def maximize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
return minimize_stochastic(negate(target_fn),
negate_all(gradient_fn),
x, y, theta_0, alpha_0)
if __name__ == "__main__":
print("using the gradient")
v = [random.randint(-10,10) for i in range(3)]
tolerance = 0.0000001
while True:
#print v, sum_of_squares(v)
gradient = sum_of_squares_gradient(v) # compute the gradient at v
next_v = step(v, gradient, -0.01) # take a negative gradient step
if distance(next_v, v) < tolerance: # stop if we're converging
break
v = next_v # continue if we're not
print("minimum v", v)
print("minimum value", sum_of_squares(v))
print()
print("using minimize_batch")
v = [random.randint(-10,10) for i in range(3)]
v = minimize_batch(sum_of_squares, sum_of_squares_gradient, v)
print("minimum v", v)
print("minimum value", sum_of_squares(v))
| unlicense |
antoinecarme/sklearn2sql_heroku | tests/databases/test_client_bad_model.py | 1 | 1084 |
import pickle, json, requests, base64
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
Y = iris.target
# print(iris.DESCR)
from sklearn.neural_network import MLPClassifier
clf = MLPClassifier()
clf.fit(X, Y)
def test_ws_sql_gen(pickle_data):
WS_URL="https://sklearn2sql.herokuapp.com/model"
b64_data = base64.b64encode(pickle_data).decode('utf-8')
data={"Name":"model1", "PickleData":b64_data , "SQLDialect":"sqlite"}
r = requests.post(WS_URL, json=data)
print(r.__dict__.keys())
print("STATUS_CODE_REASON", r.status_code , r.reason, r._content)
r.raise_for_status()
content = r.json()
# print(content.keys())
# print(content)
lSQL = content["model"]["SQLGenrationResult"][0]["SQL"]
return lSQL;
pickle_data = pickle.dumps(iris)
try:
lSQL = test_ws_sql_gen(pickle_data)
print(lSQL)
raise Exception("SHOULD_HAVE_FAIlED")
except Exception as e:
print(str(e))
if(str(e).startswith('500 Server Error')):
pass
else:
raise Exception("SHOULD_HAVE_FAIlED")
| bsd-3-clause |
CVML/scikit-learn | examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
djgagne/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
waterponey/scikit-learn | sklearn/manifold/setup.py | 43 | 1283 | import os
from os.path import join
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("manifold", parent_package, top_path)
libraries = []
if os.name == 'posix':
libraries.append('m')
config.add_extension("_utils",
sources=["_utils.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries,
extra_compile_args=["-O3"])
cblas_libs, blas_info = get_blas_info()
eca = blas_info.pop('extra_compile_args', [])
eca.append("-O4")
config.add_extension("_barnes_hut_tsne",
libraries=cblas_libs,
sources=["_barnes_hut_tsne.pyx"],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=eca, **blas_info)
config.add_subpackage('tests')
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
carpyncho/feets | res/paper/reports/features_montecarlo.py | 1 | 1591 |
import sys
import time as tmod
import warnings
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import pandas as pd
warnings.simplefilter("ignore")
sys.path.insert(0, "../FATS/")
import FATS
iterations = 100000
lc_size = 1000
random = np.random.RandomState(42)
results = {
"StetsonK": np.empty(iterations),
"StetsonJ": np.empty(iterations),
"AndersonDarling": np.empty(iterations)}
for it in range(iterations):
fs = FATS.FeatureSpace(featureList=list(results.keys()))
# a simple time array from 0 to 99 with steps of 0.01
time = np.arange(0, 100, 100./lc_size).shape
# create 1000 magnitudes with mu 0 and std 1
mags = random.normal(size=lc_size)
# create 1000 magnitudes with difference <= 0.1% than mags
mags2 = mags * random.uniform(0, 0.01, mags.size)
# create two errors for the magnitudes equivalent to the 0.001%
# of the magnitudes
errors = random.normal(scale=0.00001, size=lc_size)
errors2 = random.normal(scale=0.00001, size=lc_size)
lc = np.array([
mags, # magnitude
time, # time
errors, # error
mags, # magnitude2
mags, # aligned_magnitude
mags, # aligned_magnitude2
time, # aligned_time
errors, # aligned_error
errors # aligned_error2
])
fs.calculateFeature(lc)
for k, v in fs.result("dict").items():
results[k][it] = v
df = pd.DataFrame(results).describe()
print df
df.to_latex("features_montecarlo.tex", float_format='%.4f')
| mit |
KshitijT/fundamentals_of_interferometry | data/scripts/plotUVcoverage.py | 4 | 3438 | #!/usr/bin/python
"""Plot UVW positions from an MS"""
import matplotlib
matplotlib.rc('xtick', labelsize=15)
matplotlib.rc('ytick', labelsize=15)
import numpy as np
import matplotlib.pyplot as plt
import sys
import pyrap.tables as tbls
cc = 299792458.
if __name__ == '__main__':
from optparse import OptionParser
o = OptionParser()
o.set_usage('%prog [options] MS_FILE')
o.set_description(__doc__)
o.add_option('-d', '--uvdist', dest='uvdist', action='store_true',
help='Plot a histogram of the number of samples based on their UV distance')
o.add_option('-f', '--freqs', dest='freqs', action='store_true',
help='Include frequency information in plot')
o.add_option('-l', '--limit', dest='limit', default=None,
help='Set the uv coverage limit, no default')
o.add_option('-s', '--savefig', dest='savefig', default=None,
help='Save figure')
opts, args = o.parse_args(sys.argv[1:])
ms = tbls.table(args[0], readonly=True)
uvw = ms.getcol('UVW')
ms.close()
ms = tbls.table(args[0]+'/SPECTRAL_WINDOW', readonly=True)
freqs = ms.getcol('CHAN_FREQ')[0]
ms.close()
fig = plt.figure( figsize=(8.5,8) ) #(width, height)
if opts.uvdist:
if opts.freqs:
uvdist = []
for freq in freqs:
wl = cc / freq
uvdist.append(np.sqrt( ((uvw[:,0] / wl)**2) + ((uvw[:,1] / wl)**2) ))
uvdist = np.array(uvdist).flatten()
plt.hist(uvdist, bins=50, alpha=.5)
plt.xlabel('uv Distance ($\lambda$)', fontsize=20)
else:
uvdist = np.sqrt((uvw[:,0]**2) + (uvw[:,1]**2))
plt.hist(uvdist, bins=50, alpha=.5)
plt.xlabel('uv Distance (m)', fontsize=20)
plt.ylabel('# Baselines', fontsize=20)
plt.title('uv Distribution', fontsize=20)
else:
axes = plt.axes()
if opts.freqs:
if len(freqs)==1:
wl = cc/freqs[0]
plt.scatter(uvw[:,0]/wl, uvw[:,1]/wl, marker='s', edgecolor='none', c=(0.0,0.0,1.0), alpha=0.25)
plt.scatter(-1.*uvw[:,0]/wl, -1.*uvw[:,1]/wl, marker='s', edgecolor='none', c=(0.0,0.0,1.0), alpha=0.25)
else:
crange = freqs - np.min(freqs)
crange /= np.max(crange)
for color,freq in zip(crange,freqs):
wl = cc/freq
plt.scatter(uvw[:,0]/wl, uvw[:,1]/wl, marker='s', edgecolor='none', c=(1.0-color,0.0,color), alpha=0.25)
plt.scatter(-1.*uvw[:,0]/wl, -1.*uvw[:,1]/wl, marker='s', edgecolor='none', c=(1.0-color,0.0,color), alpha=0.25)
plt.xlabel("uu ($\lambda$)", fontsize=20)
plt.ylabel("vv ($\lambda$)", fontsize=20)
else:
plt.plot(uvw[:,0], uvw[:,1], 'k.')
plt.plot(-1.*uvw[:,0], -1.*uvw[:,1], 'k.')
plt.xlabel("uu (m)", fontsize=20)
plt.ylabel("vv (m)", fontsize=20)
if not opts.limit is None:
limit = float(opts.limit)
plt.xlim(-1.*limit, limit)
plt.ylim(-1.*limit, limit)
ax = plt.gca()
ax.set_aspect('equal')
plt.grid(True)
plt.title('uv Coverage')
if opts.savefig: plt.savefig(opts.savefig)
else: plt.show()
| gpl-2.0 |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/errors/__init__.py | 6 | 1478 | # flake8: noqa
""" expose public exceptions & warnings """
from pandas._libs.tslib import OutOfBoundsDatetime
class PerformanceWarning(Warning):
"""
Warnings shown when there is a possible performance
impact.
"""
class UnsupportedFunctionCall(ValueError):
"""
If attempting to call a numpy function on a pandas
object. For example using ``np.cumsum(groupby_object)``.
"""
class UnsortedIndexError(KeyError):
"""
Error raised when attempting to get a slice of a MultiIndex
and the index has not been lexsorted. Subclass of `KeyError`.
.. versionadded:: 0.20.0
"""
class ParserError(ValueError):
"""
Exception that is thrown by an error is encountered in `pd.read_csv`
"""
class DtypeWarning(Warning):
"""
Warning that is raised for a dtype incompatiblity. This is
can happen whenever `pd.read_csv` encounters non-
uniform dtypes in a column(s) of a given CSV file
"""
class EmptyDataError(ValueError):
"""
Exception that is thrown in `pd.read_csv` (by both the C and
Python engines) when empty data or header is encountered
"""
class ParserWarning(Warning):
"""
Warning that is raised in `pd.read_csv` whenever it is necessary
to change parsers (generally from 'c' to 'python') contrary to the
one specified by the user due to lack of support or functionality for
parsing particular attributes of a CSV file with the requsted engine
"""
| mit |
Djabbz/scikit-learn | doc/sphinxext/gen_rst.py | 106 | 40198 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
from sklearn.externals import six
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
display: none;
}
</style>
.. _examples-index:
Examples
========
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet, is_backref=False):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer" tooltip="{}">
""".format(snippet))
out.append('.. only:: html\n\n')
out.append(' .. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
</div>
""" % (ref_name))
if is_backref:
out.append('.. only:: not html\n\n * :ref:`example_%s`' % ref_name)
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet, is_backref=True))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, base_image_name + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
fig = plt.figure(fig_mngr.num)
kwargs = {}
to_rgba = matplotlib.colors.colorConverter.to_rgba
for attr in ['facecolor', 'edgecolor']:
fig_attr = getattr(fig, 'get_' + attr)()
default_attr = matplotlib.rcParams['figure.' + attr]
if to_rgba(fig_attr) != to_rgba(default_attr):
kwargs[attr] = fig_attr
fig.savefig(image_path % fig_mngr.num, **kwargs)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, base_image_name + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, base_image_name + '.rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
if app.builder.name == 'latex':
# Don't embed hyperlinks when a latex builder is used.
return
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
ishanic/scikit-learn | examples/linear_model/plot_sgd_separating_hyperplane.py | 260 | 1219 | """
=========================================
SGD: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a linear Support Vector Machines classifier
trained using SGD.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.datasets.samples_generator import make_blobs
# we create 50 separable points
X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60)
# fit the model
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-1, 5, 10)
yy = np.linspace(-1, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i, j), val in np.ndenumerate(X1):
x1 = val
x2 = X2[i, j]
p = clf.decision_function([x1, x2])
Z[i, j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
plt.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
hyperspy/hyperspy | hyperspy/tests/learn/test_cluster.py | 2 | 16658 | # -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import pytest
from hyperspy import signals
from hyperspy.misc.machine_learning import import_sklearn
sklearn = pytest.importorskip("sklearn", reason="sklearn not installed")
if import_sklearn.sklearn_installed:
# Create the data once, since the parametrizations
# will repeat the decomposition and BSS unnecessarily
rng1 = np.random.RandomState(123)
signal1 = signals.Signal1D(rng1.uniform(size=(7, 5, 7)))
signal1.decomposition()
signal1.blind_source_separation(number_of_components=3)
rng2 = np.random.RandomState(123)
signal2 = signals.Signal2D(rng2.uniform(size=(7, 5, 7)))
signal2.decomposition()
signal2.blind_source_separation(number_of_components=3)
else:
# No need to create the data, since BSS will fail
# if sklearn is missing, and the pytest.importorskip
# will skip the rest of the file anyway
pass
class TestCluster1D:
def setup_method(self):
self.signal = signal1.deepcopy()
self.navigation_mask = np.zeros((7, 5), dtype=bool)
self.navigation_mask[4:6, 1:4] = True
self.signal_mask = np.zeros((7,), dtype=bool)
self.signal_mask[2:6] = True
@pytest.mark.parametrize("algorithm", (None, "agglomerative", "spectralclustering"))
@pytest.mark.parametrize("cluster_source", ("signal", "bss", "decomposition"))
@pytest.mark.parametrize(
"source_for_centers", (None, "signal", "bss", "decomposition")
)
@pytest.mark.parametrize("preprocessing", (None, "standard", "norm", "minmax"))
@pytest.mark.parametrize("use_masks", (True, False))
def test_combinations(
self, algorithm, cluster_source, preprocessing, source_for_centers, use_masks
):
if use_masks:
navigation_mask = self.navigation_mask
signal_mask = self.signal_mask
else:
navigation_mask = None
signal_mask = None
self.signal.cluster_analysis(
cluster_source,
n_clusters=3,
source_for_centers=source_for_centers,
preprocessing=preprocessing,
navigation_mask=navigation_mask,
signal_mask=signal_mask,
algorithm=algorithm,
)
np.testing.assert_array_equal(
self.signal.learning_results.cluster_labels.shape, (3, 35)
)
np.testing.assert_array_equal(
self.signal.learning_results.cluster_centroid_signals.shape, (3, 7)
)
np.testing.assert_array_equal(
self.signal.learning_results.cluster_sum_signals.shape, (3, 7)
)
self.signal.get_cluster_labels()
self.signal.get_cluster_signals()
def test_custom_algorithm(self):
self.signal.cluster_analysis(
"signal", n_clusters=3, preprocessing="norm",
)
np.testing.assert_array_equal(
self.signal.learning_results.cluster_labels.shape, (3, 35)
)
np.testing.assert_array_equal(
self.signal.learning_results.cluster_centroid_signals.shape, (3, 7)
)
np.testing.assert_array_equal(
self.signal.learning_results.cluster_sum_signals.shape, (3, 7)
)
def test_custom_preprocessing(self):
custom_method = import_sklearn.sklearn.preprocessing.Normalizer()
self.signal.cluster_analysis(
"signal", n_clusters=3, preprocessing=custom_method, algorithm="kmeans"
)
np.testing.assert_array_equal(
self.signal.learning_results.cluster_labels.shape, (3, 35)
)
np.testing.assert_array_equal(
self.signal.learning_results.cluster_centroid_signals.shape, (3, 7)
)
np.testing.assert_array_equal(
self.signal.learning_results.cluster_sum_signals.shape, (3, 7)
)
class TestClusterSignalSources:
def setup_method(self):
self.signal = signal2.deepcopy()
self.navigation_mask = np.zeros((7,), dtype=bool)
self.navigation_mask[4:6] = True
self.signal_mask = np.zeros((5, 7), dtype=bool)
self.signal_mask[1:4, 2:6] = True
@pytest.mark.parametrize("use_masks", (True, False))
def test_cluster_source(self, use_masks):
if use_masks:
navigation_mask = self.navigation_mask
signal_mask = self.signal_mask
else:
navigation_mask = None
signal_mask = None
# test using cluster source centre is a signal
signal_copy = self.signal.deepcopy()
self.signal.cluster_analysis(
signal_copy,
n_clusters=3,
source_for_centers="signal",
preprocessing="norm",
navigation_mask=navigation_mask,
signal_mask=signal_mask,
algorithm="kmeans",
)
np.testing.assert_array_equal(
self.signal.learning_results.cluster_labels.shape, (3, 7)
)
np.testing.assert_array_equal(
self.signal.learning_results.cluster_centroid_signals.shape, (3, 35)
)
np.testing.assert_array_equal(
self.signal.learning_results.cluster_sum_signals.shape, (3, 35)
)
@pytest.mark.parametrize("use_masks", (True, False))
def test_source_center(self, use_masks):
if use_masks:
navigation_mask = self.navigation_mask
signal_mask = self.signal_mask
else:
navigation_mask = None
signal_mask = None
# test using cluster source centre is a signal
signal_copy = self.signal.deepcopy()
self.signal.cluster_analysis(
"signal",
n_clusters=3,
source_for_centers=signal_copy,
preprocessing="norm",
navigation_mask=navigation_mask,
signal_mask=signal_mask,
algorithm="kmeans",
)
np.testing.assert_array_equal(
self.signal.learning_results.cluster_labels.shape, (3, 7)
)
np.testing.assert_array_equal(
self.signal.learning_results.cluster_centroid_signals.shape, (3, 35)
)
np.testing.assert_array_equal(
self.signal.learning_results.cluster_sum_signals.shape, (3, 35)
)
@pytest.mark.filterwarnings("ignore:FastICA did not converge")
class TestClusterEstimate:
def setup_method(self):
rng = np.random.RandomState(123)
# Use prime numbers to avoid fluke equivalences
# create 3 random clusters
n_samples = [100] * 3
std = [0.05] * 3
X = []
centers = np.array(
[[-1.0, -1.0, 1, 1], [1.0, -1.0, -1.0, -1], [-1.0, 1.0, 1.0, -1.0]]
)
for i, (n, std) in enumerate(zip(n_samples, std)):
X.append(centers[i] + rng.normal(scale=std, size=(n, 4)))
X = np.concatenate(X)
rng.shuffle(X)
self.signal = signals.Signal1D(X)
self.signal.decomposition()
self.signal.blind_source_separation(number_of_components=3)
@pytest.mark.parametrize("metric", ("elbow", "silhouette", "gap"))
def test_metric(self, metric):
max_clusters = 6
self.signal.estimate_number_of_clusters(
"signal",
max_clusters=max_clusters,
preprocessing="norm",
algorithm="kmeans",
metric=metric,
)
k_range = self.signal.learning_results.cluster_metric_index
best_k = self.signal.learning_results.estimated_number_of_clusters
if isinstance(best_k, list):
best_k = best_k[0]
test_k_range = list(range(1, max_clusters + 1))
if metric == "silhouette":
test_k_range = list(range(2, max_clusters + 1))
np.testing.assert_allclose(k_range, test_k_range)
np.testing.assert_allclose(best_k, 3)
@pytest.mark.parametrize("algorithm", ("kmeans", "agglomerative"))
def test_cluster_algorithm(self, algorithm):
max_clusters = 6
self.signal.estimate_number_of_clusters(
"signal",
max_clusters=max_clusters,
preprocessing="norm",
algorithm=algorithm,
metric="elbow",
)
k_range = self.signal.learning_results.cluster_metric_index
best_k = self.signal.learning_results.estimated_number_of_clusters
if isinstance(best_k, list):
best_k = best_k[0]
test_k_range = list(range(1, max_clusters + 1))
if algorithm == "agglomerative":
test_k_range = list(range(2, max_clusters + 1))
np.testing.assert_allclose(k_range, test_k_range)
np.testing.assert_allclose(best_k, 3)
class DummyClusterAlgorithm:
def __init__(self):
self.test = None
def fit(self, X):
pass
class DummyScalingAlgorithm:
def __init__(self):
self.test = None
def fit(self, X):
pass
class TestClusterExceptions:
def setup_method(self):
self.rng = np.random.RandomState(123)
self.s = signals.Signal1D(self.rng.random_sample(size=(20, 100)))
def test_cluster_source_error(self):
with pytest.raises(
ValueError,
match="cluster source needs to be set "
"to `decomposition` , `signal` , `bss` "
"or a suitable Signal",
):
self.s.cluster_analysis("randtest", n_clusters=2)
def test_cluster_source_size_error(self):
x2 = self.rng.random_sample(size=(10, 80))
s2 = signals.Signal1D(x2)
with pytest.raises(
ValueError,
match="cluster_source does not have the same "
"navigation size as the this signal",
):
self.s.cluster_analysis(s2, n_clusters=2)
def test_cluster_source_center_size_error(self):
x2 = self.rng.random_sample(size=(10, 80))
s2 = signals.Signal1D(x2)
with pytest.raises(
ValueError,
match="cluster_source does not have the same "
"navigation size as the this signal",
):
self.s.cluster_analysis("signal", n_clusters=2, source_for_centers=s2)
def test_cluster_bss_error(self):
with pytest.raises(
ValueError,
match="A cluster source has been set to bss "
" but no blind source separation results found. "
" Please run blind source separation method first",
):
self.s.cluster_analysis("bss", n_clusters=2)
def test_cluster_decomposition_error(self):
with pytest.raises(
ValueError,
match="A cluster source has been set to "
"decomposition but no decomposition results found. "
"Please run decomposition method first",
):
self.s.cluster_analysis("decomposition", n_clusters=2)
def test_cluster_nav_mask_error(self):
nav_mask = np.zeros((11,), dtype=bool)
with pytest.raises(
ValueError,
match="Navigation mask size does not match " "signal navigation size",
):
self.s.cluster_analysis("signal", n_clusters=2, navigation_mask=nav_mask)
def test_cluster_sig_mask_error(self):
sig_mask = np.zeros((11,), dtype=bool)
with pytest.raises(
ValueError,
match="signal mask size does not match your " "cluster source signal size",
):
self.s.cluster_analysis("signal", n_clusters=2, signal_mask=sig_mask)
def test_cluster_basesig_mask_error(self):
sig_mask = np.zeros((11,), dtype=bool)
with pytest.raises(
ValueError,
match="signal mask size does not match your " "cluster source signal size",
):
self.s.cluster_analysis(
self.s.deepcopy(), n_clusters=2, signal_mask=sig_mask
)
def test_max_cluster_error(self):
max_clusters = 1
with pytest.raises(
ValueError,
match="The max number of clusters, max_clusters, "
"must be specified and be >= 2.",
):
self.s.estimate_number_of_clusters(
"signal",
max_clusters=max_clusters,
preprocessing=None,
algorithm="kmeans",
metric="elbow",
)
def test_cluster_preprocessing_object_error(self):
preprocessing = object()
with pytest.raises(
ValueError, match=r"The cluster preprocessing method should be either \w*"
):
self.s.cluster_analysis("signal", n_clusters=2, preprocessing=preprocessing)
def test_clustering_object_error(self):
empty_object = object()
with pytest.raises(
ValueError, match=r"The clustering method should be either \w*"
):
self.s.cluster_analysis("signal", n_clusters=2, algorithm=empty_object)
def test_estimate_alg_error(self):
with pytest.raises(
ValueError,
match="Estimate number of clusters only works with "
"supported clustering algorithms",
):
self.s.estimate_number_of_clusters("signal", algorithm="orange")
def test_estimate_pre_error(self):
with pytest.raises(
ValueError,
match="Estimate number of clusters only works with "
"supported preprocessing algorithms",
):
self.s.estimate_number_of_clusters("signal", preprocessing="orange")
def test_sklearn_exception(self):
import_sklearn.sklearn_installed = False
with pytest.raises(ImportError):
self.s.cluster_analysis("signal", n_clusters=2)
import_sklearn.sklearn_installed = True
def test_sklearn_exception2(self):
import_sklearn.sklearn_installed = False
with pytest.raises(ImportError):
self.s._get_cluster_algorithm("kmeans")
import_sklearn.sklearn_installed = True
def test_sklearn_exception3(self):
import_sklearn.sklearn_installed = False
with pytest.raises(ImportError):
self.s._get_cluster_preprocessing_algorithm("norm")
import_sklearn.sklearn_installed = True
def test_preprocess_alg_exception(self):
sc = DummyScalingAlgorithm()
with pytest.raises(
ValueError, match=r"The cluster preprocessing method should be \w*"
):
self.s.cluster_analysis("signal", n_clusters=2, preprocessing=sc)
def test_cluster_alg_exception(self):
sc = DummyClusterAlgorithm()
with pytest.raises(AttributeError, match=r"Fited cluster estimator \w*"):
self.s.cluster_analysis("signal", n_clusters=2, algorithm=sc)
def test_get_methods():
rng = np.random.RandomState(123)
signal = signals.Signal1D(rng.random_sample(size=(11, 5, 7)))
signal.decomposition()
signal.cluster_analysis("signal", n_clusters=2)
signal.unfold()
cl = signal.get_cluster_labels(merged=True)
np.testing.assert_array_equal(
cl.data,
(signal.learning_results.cluster_labels * np.arange(2)[:, np.newaxis]).sum(0),
)
cl = signal.get_cluster_labels(merged=False)
np.testing.assert_array_equal(cl.data, signal.learning_results.cluster_labels)
cl = signal.get_cluster_signals(signal="sum")
np.testing.assert_array_equal(cl.data, signal.learning_results.cluster_sum_signals)
cl = signal.get_cluster_signals(signal="centroid")
np.testing.assert_array_equal(
cl.data, signal.learning_results.cluster_centroid_signals
)
cl = signal.get_cluster_signals(signal="mean")
np.testing.assert_array_equal(
cl.data,
signal.learning_results.cluster_sum_signals
/ signal.learning_results.cluster_labels.sum(1, keepdims=True),
)
cl = signal.get_cluster_distances()
np.testing.assert_array_equal(cl.data, signal.learning_results.cluster_distances)
| gpl-3.0 |
FluVigilanciaBR/seasonality | methods/data_filter/contingency_level.py | 1 | 8367 | # coding:utf8
__author__ = 'Marcelo Ferreira da Costa Gomes'
import pandas as pd
import numpy as np
from fludashboard.libs.flu_data import FluDB
db = FluDB()
def get_all_territories_and_years(filtertype: str='srag'):
table_suffix = {
'srag': '',
'sragnofever': '_sragnofever',
'hospdeath': '_hospdeath'
}
df = db.read_data(
table_name='current_estimated_values%s' % table_suffix[filtertype],
dataset_id=1, scale_id=1, territory_id=0
)
list_of_years = list(set(df.epiyear))
with db.conn.connect() as conn:
sql = '''
SELECT id FROM territory
'''
list_of_territories = list(set(pd.read_sql(sql, conn).id))
df = pd.DataFrame(
[
{'territory_id': t, 'epiyear': y} for t in set(list_of_territories) - {9, 99, 9999}
for y in list_of_years
]
)
return df
def contingency_trigger(dataset_id: int, year: int, territory_id: int, filtertype: str='srag'):
"""
:param dataset_id:
:param year:
:param territory_id:
:param filtertype:
:return:
"""
df = db.get_data(
dataset_id=dataset_id, scale_id=1, year=year,
territory_id=territory_id, filter_type=filtertype
)[['estimated_cases', 'typical_median', 'typical_low', 'typical_high']]
# get_data stores the difference between typical levels in each column:
df.typical_high += df.typical_median + df.typical_low
# If not obitoflu dataset (3), uses last 4 weeks, o.w. use 3:
if dataset_id < 3:
wdw = 4
else:
wdw = 3
weeks = df.shape[0]
alert_zone = False
data_increase = False
if weeks < wdw+1:
alert_zone = False
data_increase = False
else:
for i in range(wdw+1, weeks+1):
alert_zone = any(df.estimated_cases[(i-wdw):i] > df.typical_high[(i-wdw):i])
data_increase = all(
df.estimated_cases[(i-wdw):i].values -
df.estimated_cases[(i-wdw - 1):(i-1)].values > 0
)
if alert_zone & data_increase:
return alert_zone & data_increase, i-1
return alert_zone & data_increase, 1
def check_contingency_decrease(year: int, territory_id: int, cont_level: int, week: int, filtertype: str='srag'):
dataset_id = cont_level - 1
df = db.get_data(
dataset_id=dataset_id, scale_id=1, year=year,
territory_id=territory_id, filter_type=filtertype
).loc[lambda dftmp: (dftmp['situation_id'] == 2) | (dftmp['situation_id'] == 3),
['estimated_cases', 'typical_median', 'typical_low']]
# get_data stores the difference between typical levels in each column:
df.typical_median += df.typical_low
weeks = df.shape[0]
for i in range(week, weeks):
if all(df.estimated_cases[i:(i+2)] <= df.typical_median[i:(i+2)]):
cont_level -= 1
week = i+1
if cont_level > 1:
check_contingency_decrease(year=year, territory_id=territory_id, cont_level=cont_level, week=week,
filtertype=filtertype)
else:
break
return cont_level
def contingency_level(year: int, territory_id: int, maximum=False, filtertype: str='srag'):
for dataset_id in range(3, 0, -1):
alert, week = contingency_trigger(dataset_id=dataset_id, year=year, territory_id=territory_id,
filtertype=filtertype)
if alert & (not maximum):
return check_contingency_decrease(year=year, territory_id=territory_id,
cont_level=(dataset_id+1), week=week, filtertype=filtertype)
elif alert:
return(dataset_id+1)
return (1)
def calc_weekly_alert_level(se: pd.Series):
_max = max([
se.very_high_level, se.high_level, se.epidemic_level, se.low_level
])
return (
0 if np.isnan(_max) else
1 if se.low_level == _max else
2 if se.epidemic_level == _max else
3 if se.high_level == _max else
4
)
def apply_filter_alert_by_epiweek(year: int, territory_id: int, filtertype: str='srag'):
df = pd.DataFrame()
for dataset_id in range(1, 7):
df = df.append(
db.get_data(
dataset_id=dataset_id, scale_id=1, year=year, territory_id=territory_id, filter_type=filtertype
)[['dataset_id', 'territory_id', 'epiyear', 'epiweek', 'low_level', 'epidemic_level', 'high_level',
'very_high_level', 'situation_id']],
ignore_index=True
)
if year > 2009:
df = df.append(
db.get_data(
dataset_id=dataset_id, scale_id=1, year=year-1, territory_id=territory_id, filter_type=filtertype
).loc[lambda df: df.epiweek == max(df.epiweek[~df.situation_id.isin([1, 4])]),
['dataset_id', 'territory_id', 'epiyear', 'epiweek',
'low_level', 'epidemic_level', 'high_level',
'very_high_level', 'situation_id']],
ignore_index=True
)
epiweek = max(df.epiweek[~df.situation_id.isin([1, 4]) & (df.epiyear == year)])
df.loc[(df.situation_id.isin([1, 4])) & (df.epiweek > epiweek - 2) & (df.epiyear == year),
['low_level', 'epidemic_level', 'high_level', 'very_high_level']] = None
df['alert'] = df.apply(calc_weekly_alert_level, axis=1)
df.sort_values(by=['territory_id', 'dataset_id', 'epiyear', 'epiweek'], inplace=True)
df.loc[df.alert == 0, 'alert'] = None
df.alert = df.alert.fillna(method='ffill').astype(int)
df = df[df.epiyear == year]
return df[['dataset_id', 'territory_id', 'epiyear', 'epiweek', 'alert', 'low_level', 'epidemic_level',
'high_level', 'very_high_level']]
def weekly_alert_table_all(df, filtertype: str='srag'):
df_alert = pd.DataFrame()
for territory_id in sorted(df.territory_id.unique()):
for epiyear in sorted(df.epiyear.unique()):
df_alert = df_alert.append(apply_filter_alert_by_epiweek(year=epiyear, territory_id=territory_id,
filtertype=filtertype),
ignore_index=True)
return df_alert
def season_alert_level(se):
alert_counts = se.value_counts()
ix_max = max(alert_counts.index)
if ix_max in [3, 4]:
try:
high_threshold = alert_counts[3] + alert_counts[4]
except:
high_threshold = alert_counts[max(alert_counts.index)]
else:
high_threshold = 0
return (
4 if high_threshold >= 5 else
3 if high_threshold >= 1 else
2 if 2 in alert_counts.index else
1
)
def calc_season_contingency(filtertype: str='srag'):
df_contingency = get_all_territories_and_years()
df_contingency['contingency'] = df_contingency.apply(lambda x: contingency_level(year=x.epiyear,
territory_id=x.territory_id,
filtertype=filtertype),
axis=1)
df_contingency['contingency_max'] = df_contingency.apply(lambda x: contingency_level(year=x.epiyear,
territory_id=x.territory_id,
maximum=True,
filtertype=filtertype),
axis=1)
return df_contingency
def calc_season_alert(df_alert_weekly):
df_alert_season = df_alert_weekly[['dataset_id', 'territory_id', 'epiyear']].drop_duplicates()
df_alert_season['season_level'] = df_alert_season.apply(lambda se: season_alert_level(
df_alert_weekly.alert[
(df_alert_weekly.dataset_id == se.dataset_id) &
(df_alert_weekly.territory_id == se.territory_id) &
(df_alert_weekly.epiyear == se.epiyear)]
), axis=1)
return df_alert_season
| gpl-3.0 |
chrisdembia/agent-bicycle | randlov1998/analysis.py | 1 | 2255 | """Functions for plotting results, etc.
"""
import numpy as np
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import pylab as pl
from scipy import r_
from pybrain.tools.customxml.networkreader import NetworkReader
from pybrain.utilities import one_to_n
def plot_nfq_action_value_history(network_name_prefix, count, state=[0, 0, 0, 0, 0], n_actions=9):
"""Example::
>>> plot_nfq_action_value_history('randlov_actionvaluenetwork_',
np.arange(0, 30, 10))
This will plot the data from the files:
randlov_actionvaluenetwork_0.xml
randlov_actionvaluenetwork_10.xml
randlov_actionvaluenetwork_20.xml
randlov_actionvaluenetwork_30.xml
"""
# TODO any file naming.
n_times = len(count)
actionvalues = np.empty((n_times, n_actions))
for i in range(n_times):
fname = network_name_prefix + '%i.xml' % count[i]
actionvalues[i, :] = nfq_action_value(fname)
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
actions = np.arange(n_actions)
X, Y = np.meshgrid(actions, count)
#ax.plot_surface(X, Y, actionvalues)
ax.plot_wireframe(X, Y, actionvalues)
plt.show()
def plot_nfq_action_value(network_name, state=[0, 0, 0, 0, 0]):
"""Plots Q(a) for the given state. Must provide a network serialization
(.xml). Assumes there are 9 action values.
Example::
>>> plot_nfq_action_value('randlov_actionvaluenetwork.xml', [0, 0, 0, 0, 0])
"""
pl.ion()
n_actions = 9
actionvalues = nfq_action_value(network_name, state)
actions = np.arange(len(actionvalues))
bar_width = 0.35
pl.bar(actions, actionvalues, bar_width)
pl.xticks(actions + 0.5 * bar_width, actions)
pl.show()
def nfq_action_value(network_fname, state=[0, 0, 0, 0, 0]):
# TODO generalize away from 9 action values. Ask the network how many
# discrete action values there are.
n_actions = 9
network = NetworkReader.readFrom(network_fname)
actionvalues = np.empty(n_actions)
for i_action in range(n_actions):
network_input = r_[state, one_to_n(i_action, n_actions)]
actionvalues[i_action] = network.activate(network_input)
return actionvalues
| mit |
Evfro/polara | polara/datasets/yahoo.py | 1 | 1594 | import tarfile
import pandas as pd
def get_yahoo_music_data(path=None, fileid=0, include_test=True, read_attributes=False, read_genres=False):
res = []
if path:
data_folder = 'ydata-ymusic-user-song-ratings-meta-v1_0'
col_names = ['userid', 'songid', 'rating']
with tarfile.open(path, 'r:gz') as tar:
handle = tar.getmember(f'{data_folder}/train_{fileid}.txt')
file = tar.extractfile(handle)
data = pd.read_csv(file, sep='\t', header=None, names=col_names)
res.append(data)
if include_test:
handle = tar.getmember(f'{data_folder}/test_{fileid}.txt')
file = tar.extractfile(handle)
data = pd.read_csv(file, sep='\t', header=None, names=col_names)
res.append(data)
if read_attributes:
handle = tar.getmember(f'{data_folder}/song-attributes.txt')
file = tar.extractfile(handle)
attr = pd.read_csv(file, sep='\t', header=None, index_col=0,
names=['songid', 'albumid', 'artistid', 'genreid'])
res.append(attr)
if read_genres:
handle = tar.getmember(f'{data_folder}/song-attributes.txt')
file = tar.extractfile(handle)
genres = pd.read_csv(file, sep='\t', header=None, index_col=0,
names=['genreid', 'parent_genre', 'level', 'genre_name'])
res.append(genres)
if len(res) == 1:
res = res[0]
return res
| mit |
xiaoxiamii/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
MiguelAguilera/critical-learning | Mountain-Car/train.py | 1 | 7919 | #!/usr/bin/env python
from embodied_ising import ising
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# plt.switch_backend('agg')
import ising_correlations as ic
from copy import copy
import numpy.random as rnd
plt.rc('text', usetex=True)
font = {'family': 'serif', 'size': 15, 'serif': ['computer modern roman']}
plt.rc('font', **font)
plt.rc('legend', **{'fontsize': 13})
###############################
"""
Variables
"""
########## Network ##########
size = 4
Nsensors = 1
Nmotors = 1
########## Lattice reconfiguration ##########
L = 100 # Lattice size
corrs = 1 # number of lattice reconfigurations
########## Microbial Genetic Algorithm ##########
P = 1 # Population size
TourBatchSize = 10 # number of microbial tournament executions per repetition
########## Critical Cognitive Learning ##########
T = 500 # number of samples/iteration
Iterations = 100
repetitions = 10
###############################
"""
Method to calculate the level of adjustment between
real and reference correlations
Parameters:
I: Ising model with actual and reference correlations
Returns:
fit: the fitness
i: the index of the element of the network with the worst correlations
"""
def calculateCorrelationFitness(I):
fit = np.amax(np.abs(I.C_ref - I.C))
diffC = np.abs(I.C_ref - I.C)
# looking for the element with the worst correlations
s = np.sum(diffC, axis=0)
i = s.argmax()
return fit, i
###############################
"""
Method to find a better lattice configuration in order to approximate
the reference correlations to those generated by a given Ising model
"""
def adjustRefCorrelations(I, mode='all'):
fit_old, index = calculateCorrelationFitness(I)
I_new = copy(I)
# I_new.m_ref = ic.random_means(I.size)
I_new.m_ref = np.zeros(I.size)
I_new.pos = ic.move_one_position(I_new, mode=mode)
I_new.C_ref = ic.ising_correlations(I_new.pos, I_new.m_ref)
fit_new, index_new = calculateCorrelationFitness(I_new)
diffFit = fit_new - fit_old
# print("diffFit: "+str(diffFit))
if(diffFit < 0): # the new fit is better
# print("corr fit: "+str(fit_new))
return I_new
else:
# print("corr fit: "+str(fit_old))
return I
###############################
"""
Microbial genetic algorithm
used to find the best agent
"""
def microbial_tournament(Population, fit):
# Recombination and mutation rates
REC = 0.5
MUT = 0.1
# Seleccionamos dos células aleatoriamente para el torneo
i = np.random.randint(len(Population))
j = np.random.randint(len(Population))
while j == i:
j = np.random.randint(len(Population))
# Tournament
# Fitness evaluation for each configuration
fit[i] = calculateCognitiveFitness(Population[i])
fit[j] = calculateCognitiveFitness(Population[j])
if fit[j] > fit[i]: # we change the order to maintain j as looser
i1 = i
i = j
j = i1
I = Population[i]
J = Population[j]
#Recombination and mutation
for ind in range(I.size**2):
r1, r2 = np.unravel_index(ind, (I.size, I.size))
if np.random.rand(1) < REC:
J.J[r1, r2] = I.J[r1, r2]
if np.random.rand(1) < MUT:
J.J[r1, r2] *= rnd.uniform(-2, 2)
###############################
"""
Method to calculate the performance of the ising model
in terms of the cognitive task
Task: maximize the time in the right hill
"""
def calculateCognitiveFitness(I, nSamples=1000, plot=False):
data = I.simulate(nSamples, plot)
fit = np.mean(data > 0.8)
return fit
###############################
"""
Initialize population
"""
Population = []
fitness = np.zeros(P)
for i in range(P):
I = ising(size, Nsensors, Nmotors)
# I.m_ref = ic.random_means(size)
I.m_ref = np.zeros(size)
I.pos = ic.random_positions(L, L, size)
I.C_ref = ic.ising_correlations(I.pos, I.m_ref)
Population.append(I)
# fitness[i]=calculateCognitiveFitness(I)
###############################
"""
Update correlation (Population level)
"""
def updateCorrelations(Population, fitness):
reconfErrors = np.zeros(corrs)
criticalErrors = np.zeros(Iterations)
for p in range(P):
I = Population[p]
criticalErrors = I.CriticalLearning(Iterations, T)
printRefPos(I.pos)
printDistanceMatrix(I.size, I.C_ref)
printDistanceMatrix(I.size, I.C)
input("pulse tecla...")
# print("critical errors")
# print(criticalErrors)
for i in range(corrs):
I = adjustRefCorrelations(I, mode='hidden')
fit, index = calculateCorrelationFitness(I)
reconfErrors[i] = fit
Population[p] = I
# best = bestFitness.argmax()
return reconfErrors, criticalErrors
###############################
"""
#Update cognitive fitness (Population level)
"""
def updateCognitiveFitness(Population, fitness):
for p in range(P):
I = Population[p]
fitness[p] = calculateCognitiveFitness(I)
###############################
"""
"""
def printDistanceMatrix(size, C):
distanceMatrix = np.zeros((size, size))
for i in range(size):
for j in range(i + 1, size):
distanceMatrix[i, j] = np.floor(np.abs(C[i, j] / 0.9)**-4)
print(distanceMatrix)
###############################
"""
"""
def printRefPos(pos):
distances = np.zeros((len(pos), len(pos)))
for i in range(len(pos)):
for j in range(i + 1, len(pos)):
distances[i, j] = np.abs(
pos[i, 0] - pos[j, 0]) + np.abs(pos[i, 1] - pos[j, 1])
print(distances)
###############################
"""
Training algorithm
"""
cogFitness = np.zeros(repetitions * TourBatchSize)
reconfErrors = np.zeros(repetitions * corrs)
criticalErrors = np.zeros(repetitions * Iterations)
for rep in range(repetitions):
reconfE, criticalE = updateCorrelations(Population, fitness)
reconfErrors[rep * corrs:(rep + 1) * corrs] = reconfE
criticalErrors[rep * Iterations:(rep + 1) * Iterations] = criticalE
# plt.scatter(rep,corrFitness[rep],color='k')
# plt.pause(0.05)
# updateCognitiveFitness(Population,fitness)
# for i in range(TourBatchSize):
# microbial_tournament(Population,fitness)
# best = fitness.argmax()
# f=fitness[best]
# x=rep*TourBatchSize+i
# cogFitness[x]=f
# print("Best fitness: "+str(f))
plt.plot(range(len(reconfErrors)), reconfErrors)
plt.xlabel("Reconfiguration index")
plt.ylabel("Error")
plt.title('Correlation errors during lattice reconfiguration')
xcoords = []
for i in range(repetitions):
xcoords.append(i * corrs)
for xc in xcoords:
plt.axvline(x=xc, color='gray', linestyle='--', linewidth=0.5)
plt.savefig("./reconfigurationErrors" + str(size) + ".pdf")
plt.figure()
plt.plot(range(len(criticalErrors)), criticalErrors)
plt.xlabel("Iteration")
plt.ylabel("Error")
plt.title('Correlation errors during Critical Learning')
xcoords = []
for i in range(repetitions):
xcoords.append(i * Iterations)
for xc in xcoords:
plt.axvline(x=xc, color='gray', linestyle='--', linewidth=0.5)
plt.savefig("./LearningErrors" + str(size) + ".pdf")
# plt.plot(cogFitness)
#plt.title('Cognitive Fitness')
#best = fitness.argmax()
#print("Best fitness: "+str(fitness[best]))
#Ibest = Population[best]
# Ibest.render()
# j=rep*corrs+i
# y[j]=fit
# plt.scatter(j,y[j],color='k')
# plt.pause(0.05)
# plt.plot(y)
# print("Calculating Performance...")
# fit = calculateCognitiveFitness(I,1000,False)
# print(fit)
###############################
"""
Save model
"""
# filename = 'files/network-size_' + str(size) + '-sensors_' + str(Nsensors) + '-motors_' + str(
# Nmotors) + '-T_' + str(T) + '-Iterations_' + str(Iterations) + '-ind_' + str(rep) + '.npz'
#np.savez(filename, J=I.J, h=I.h, m1=I.m_ref, Cint=I.C_ref)
| gpl-3.0 |
neskk/PokemonGo-Map | pogom/geofence.py | 14 | 5762 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import timeit
import logging
from .utils import get_args
log = logging.getLogger(__name__)
args = get_args()
# Trying to import matplotlib, which is not compatible with all hardware.
# Matlplotlib is faster for big calculations.
try:
from matplotlib.path import Path
except ImportError as e:
# Pass as this is an optional requirement. We're going to check later if it
# was properly imported and only use it if it's installed.
pass
class Geofences:
def __init__(self):
self.geofenced_areas = []
self.excluded_areas = []
self.use_matplotlib = 'matplotlib' in sys.modules
if args.geofence_file or args.geofence_excluded_file:
log.info('Loading geofenced or excluded areas.')
self.geofenced_areas = self.parse_geofences_file(
args.geofence_file, excluded=False)
self.excluded_areas = self.parse_geofences_file(
args.geofence_excluded_file, excluded=True)
log.info('Loaded %d geofenced and %d excluded areas.',
len(self.geofenced_areas),
len(self.excluded_areas))
def is_enabled(self):
return (self.geofenced_areas or self.excluded_areas)
def get_geofenced_coordinates(self, coordinates):
log.info('Using matplotlib: %s.', self.use_matplotlib)
log.info('Found %d coordinates to geofence.', len(coordinates))
geofenced_coordinates = []
startTime = timeit.default_timer()
for c in coordinates:
# Coordinate is not valid if in one excluded area.
if self._is_excluded(c):
continue
# Coordinate is geofenced if in one geofenced area.
if self.geofenced_areas:
for va in self.geofenced_areas:
if self._in_area(c, va):
geofenced_coordinates.append(c)
break
else:
geofenced_coordinates.append(c)
elapsedTime = timeit.default_timer() - startTime
log.info('Geofenced to %s coordinates in %.2fs.',
len(geofenced_coordinates), elapsedTime)
return geofenced_coordinates
def _is_excluded(self, coordinate):
for ea in self.excluded_areas:
if self._in_area(coordinate, ea):
return True
return False
def _in_area(self, coordinate, area):
if args.spawnpoint_scanning:
point = {'lat': coordinate['lat'], 'lon': coordinate['lng']}
else:
point = {'lat': coordinate[0], 'lon': coordinate[1]}
polygon = area['polygon']
if self.use_matplotlib:
return self.is_point_in_polygon_matplotlib(point, polygon)
else:
return self.is_point_in_polygon_custom(point, polygon)
@staticmethod
def parse_geofences_file(geofence_file, excluded):
geofences = []
# Read coordinates of excluded areas from file.
if geofence_file:
with open(geofence_file) as f:
for line in f:
line = line.strip()
if len(line) == 0: # Empty line.
continue
elif line.startswith("["): # Name line.
name = line.replace("[", "").replace("]", "")
geofences.append({
'excluded': excluded,
'name': name,
'polygon': []
})
log.debug('Found geofence: %s.', name)
else: # Coordinate line.
lat, lon = line.split(",")
LatLon = {'lat': float(lat), 'lon': float(lon)}
geofences[-1]['polygon'].append(LatLon)
return geofences
@staticmethod
def is_point_in_polygon_matplotlib(point, polygon):
pointTuple = (point['lat'], point['lon'])
polygonTupleList = []
for c in polygon:
coordinateTuple = (c['lat'], c['lon'])
polygonTupleList.append(coordinateTuple)
polygonTupleList.append(polygonTupleList[0])
path = Path(polygonTupleList)
return path.contains_point(pointTuple)
@staticmethod
def is_point_in_polygon_custom(point, polygon):
# Initialize first coordinate as default.
maxLat = polygon[0]['lat']
minLat = polygon[0]['lat']
maxLon = polygon[0]['lon']
minLon = polygon[0]['lon']
for coords in polygon:
maxLat = max(coords['lat'], maxLat)
minLat = min(coords['lat'], minLat)
maxLon = max(coords['lon'], maxLon)
minLon = min(coords['lon'], minLon)
if ((point['lat'] > maxLat) or (point['lat'] < minLat) or
(point['lon'] > maxLon) or (point['lon'] < minLon)):
return False
inside = False
lat1, lon1 = polygon[0]['lat'], polygon[0]['lon']
N = len(polygon)
for n in range(1, N+1):
lat2, lon2 = polygon[n % N]['lat'], polygon[n % N]['lon']
if (min(lon1, lon2) < point['lon'] <= max(lon1, lon2) and
point['lat'] <= max(lat1, lat2)):
if lon1 != lon2:
latIntersection = (
(point['lon'] - lon1) *
(lat2 - lat1) / (lon2 - lon1) +
lat1)
if lat1 == lat2 or point['lat'] <= latIntersection:
inside = not inside
lat1, lon1 = lat2, lon2
return inside
| agpl-3.0 |
vortex-ape/scikit-learn | sklearn/tests/test_init.py | 4 | 2258 | # Basic unittests to test functioning of module's top-level
import subprocess
import pkgutil
import pytest
import sklearn
from sklearn.utils.testing import assert_equal
__author__ = 'Yaroslav Halchenko'
__license__ = 'BSD'
try:
from sklearn import * # noqa
_top_import_error = None
except Exception as e:
_top_import_error = e
def test_import_skl():
# Test either above import has failed for some reason
# "import *" is discouraged outside of the module level, hence we
# rely on setting up the variable above
assert_equal(_top_import_error, None)
def test_import_sklearn_no_warnings():
# Test that importing scikit-learn main modules doesn't raise any warnings.
try:
pkgs = pkgutil.iter_modules(path=sklearn.__path__, prefix='sklearn.')
import_modules = '; '.join(['import ' + modname
for _, modname, _ in pkgs
if (not modname.startswith('_') and
# add deprecated top level modules
# below to ignore them
modname not in [])])
message = subprocess.check_output(['python', '-Wdefault',
'-c', import_modules],
stderr=subprocess.STDOUT)
message = message.decode("utf-8")
message = '\n'.join([line for line in message.splitlines()
if not (
# ignore ImportWarning due to Cython
"ImportWarning" in line or
# ignore DeprecationWarning due to pytest
"pytest" in line or
# ignore DeprecationWarnings due to
# numpy.oldnumeric
"oldnumeric" in line
)])
assert 'Warning' not in message
assert 'Error' not in message
except Exception as e:
pytest.skip('soft-failed test_import_sklearn_no_warnings.\n'
' %s, \n %s' % (e, message))
| bsd-3-clause |
Tomasuh/Tomasuh.github.io | files/cyclic/analyse.py | 1 | 1922 | import pandas
import sqlite3
import dbcommands
import numpy as np
import time
import matplotlib.pyplot as plt
db_obj = dbcommands.the_db()
posts = db_obj.fetch_posts()
df = pandas.DataFrame(data=posts, columns = ["key",\
"title",\
"user",\
"date",\
"size",\
"syntax",\
"expire",\
"scrape_url",\
"full_url"])
df["size"] = df["size"].astype(int)
df = df.sort_values(by='date',ascending=True)
threshold = 10 - 1
max_diff_sec = 60
complete_ranges = []
for _, user_df in df.groupby("user"):
user = user_df["user"].iloc[0]
diff_middle = None
timestamps = []
repeated_intervals = 0
the_range = []
# Loop over grouped users
for __, row in user_df.iterrows():
current_ts = row["date"]
success = True
nr = 0
while nr < len(timestamps):
diff_to = (current_ts - timestamps[nr]).total_seconds()
avg_diff = diff_to/(len(timestamps) - nr)
if len(timestamps) == 1:
diff_middle = avg_diff
elif avg_diff > diff_middle + max_diff_sec or avg_diff < diff_middle - max_diff_sec:
success = False
break
nr += 1
if success:
timestamps.append(current_ts)
the_range.append((row["title"], current_ts, row["full_url"]))
repeated_intervals += 1
else:
if repeated_intervals >= threshold:
complete_ranges.append((row["user"], diff_middle, the_range))
the_range = the_range[:-1]
repeated_intervals = 0
timestamps = timestamps[:-1]
for user, diff_avg, cyclic_range in complete_ranges:
df_c = pandas.DataFrame(data=cyclic_range, columns = ["title",\
"time",\
"url"])
for title, time, url in cyclic_range:
print "%s %s %s %s" % (user, time, url, title)
plt.plot(df_c['time'], [diff_avg/60] * len(df_c['time']), 'ro')
plt.ylabel("Avg interval minutes")
plt.text(0.6, 0.8, user, fontsize=14, transform=plt.gcf().transFigure)
plt.show()
| mit |
cwhanse/pvlib-python | pvlib/atmosphere.py | 3 | 24256 | """
The ``atmosphere`` module contains methods to calculate relative and
absolute airmass and to determine pressure from altitude or vice versa.
"""
from warnings import warn
import numpy as np
import pandas as pd
APPARENT_ZENITH_MODELS = ('simple', 'kasten1966', 'kastenyoung1989',
'gueymard1993', 'pickering2002')
TRUE_ZENITH_MODELS = ('youngirvine1967', 'young1994')
AIRMASS_MODELS = APPARENT_ZENITH_MODELS + TRUE_ZENITH_MODELS
def pres2alt(pressure):
'''
Determine altitude from site pressure.
Parameters
----------
pressure : numeric
Atmospheric pressure. [Pa]
Returns
-------
altitude : numeric
Altitude above sea level. [m]
Notes
------
The following assumptions are made
============================ ================
Parameter Value
============================ ================
Base pressure 101325 Pa
Temperature at zero altitude 288.15 K
Gravitational acceleration 9.80665 m/s^2
Lapse rate -6.5E-3 K/m
Gas constant for air 287.053 J/(kg K)
Relative Humidity 0%
============================ ================
References
-----------
.. [1] "A Quick Derivation relating altitude to air pressure" from
Portland State Aerospace Society, Version 1.03, 12/22/2004.
'''
alt = 44331.5 - 4946.62 * pressure ** (0.190263)
return alt
def alt2pres(altitude):
'''
Determine site pressure from altitude.
Parameters
----------
altitude : numeric
Altitude above sea level. [m]
Returns
-------
pressure : numeric
Atmospheric pressure. [Pa]
Notes
------
The following assumptions are made
============================ ================
Parameter Value
============================ ================
Base pressure 101325 Pa
Temperature at zero altitude 288.15 K
Gravitational acceleration 9.80665 m/s^2
Lapse rate -6.5E-3 K/m
Gas constant for air 287.053 J/(kg K)
Relative Humidity 0%
============================ ================
References
-----------
.. [1] "A Quick Derivation relating altitude to air pressure" from
Portland State Aerospace Society, Version 1.03, 12/22/2004.
'''
press = 100 * ((44331.514 - altitude) / 11880.516) ** (1 / 0.1902632)
return press
def get_absolute_airmass(airmass_relative, pressure=101325.):
r'''
Determine absolute (pressure-adjusted) airmass from relative
airmass and pressure.
The calculation for absolute airmass (:math:`AM_a`) is
.. math::
AM_a = AM_r \frac{P}{101325}
where :math:`AM_r` is relative air mass at sea level and :math:`P` is
atmospheric pressure.
Parameters
----------
airmass_relative : numeric
The airmass at sea level. [unitless]
pressure : numeric, default 101325
Atmospheric pressure. [Pa]
Returns
-------
airmass_absolute : numeric
Absolute (pressure-adjusted) airmass
References
----------
.. [1] C. Gueymard, "Critical analysis and performance assessment of
clear sky solar irradiance models using theoretical and measured
data," Solar Energy, vol. 51, pp. 121-138, 1993.
'''
airmass_absolute = airmass_relative * pressure / 101325.
return airmass_absolute
def get_relative_airmass(zenith, model='kastenyoung1989'):
'''
Calculate relative (not pressure-adjusted) airmass at sea level.
Parameter ``model`` allows selection of different airmass models.
Parameters
----------
zenith : numeric
Zenith angle of the sun. [degrees]
model : string, default 'kastenyoung1989'
Available models include the following:
* 'simple' - secant(apparent zenith angle) -
Note that this gives -Inf at zenith=90
* 'kasten1966' - See reference [1] -
requires apparent sun zenith
* 'youngirvine1967' - See reference [2] -
requires true sun zenith
* 'kastenyoung1989' (default) - See reference [3] -
requires apparent sun zenith
* 'gueymard1993' - See reference [4] -
requires apparent sun zenith
* 'young1994' - See reference [5] -
requries true sun zenith
* 'pickering2002' - See reference [6] -
requires apparent sun zenith
Returns
-------
airmass_relative : numeric
Relative airmass at sea level. Returns NaN values for any
zenith angle greater than 90 degrees. [unitless]
Notes
-----
Some models use apparent (refraction-adjusted) zenith angle while
other models use true (not refraction-adjusted) zenith angle. Apparent
zenith angles should be calculated at sea level.
References
----------
.. [1] Fritz Kasten. "A New Table and Approximation Formula for the
Relative Optical Air Mass". Technical Report 136, Hanover, N.H.:
U.S. Army Material Command, CRREL.
.. [2] A. T. Young and W. M. Irvine, "Multicolor Photoelectric
Photometry of the Brighter Planets," The Astronomical Journal, vol.
72, pp. 945-950, 1967.
.. [3] Fritz Kasten and Andrew Young. "Revised optical air mass tables
and approximation formula". Applied Optics 28:4735-4738
.. [4] C. Gueymard, "Critical analysis and performance assessment of
clear sky solar irradiance models using theoretical and measured
data," Solar Energy, vol. 51, pp. 121-138, 1993.
.. [5] A. T. Young, "AIR-MASS AND REFRACTION," Applied Optics, vol. 33,
pp. 1108-1110, Feb 1994.
.. [6] Keith A. Pickering. "The Ancient Star Catalog". DIO 12:1, 20,
.. [7] Matthew J. Reno, Clifford W. Hansen and Joshua S. Stein, "Global
Horizontal Irradiance Clear Sky Models: Implementation and Analysis"
Sandia Report, (2012).
'''
# set zenith values greater than 90 to nans
z = np.where(zenith > 90, np.nan, zenith)
zenith_rad = np.radians(z)
model = model.lower()
if 'kastenyoung1989' == model:
am = (1.0 / (np.cos(zenith_rad) +
0.50572*((6.07995 + (90 - z)) ** - 1.6364)))
elif 'kasten1966' == model:
am = 1.0 / (np.cos(zenith_rad) + 0.15*((93.885 - z) ** - 1.253))
elif 'simple' == model:
am = 1.0 / np.cos(zenith_rad)
elif 'pickering2002' == model:
am = (1.0 / (np.sin(np.radians(90 - z +
244.0 / (165 + 47.0 * (90 - z) ** 1.1)))))
elif 'youngirvine1967' == model:
sec_zen = 1.0 / np.cos(zenith_rad)
am = sec_zen * (1 - 0.0012 * (sec_zen * sec_zen - 1))
elif 'young1994' == model:
am = ((1.002432*((np.cos(zenith_rad)) ** 2) +
0.148386*(np.cos(zenith_rad)) + 0.0096467) /
(np.cos(zenith_rad) ** 3 +
0.149864*(np.cos(zenith_rad) ** 2) +
0.0102963*(np.cos(zenith_rad)) + 0.000303978))
elif 'gueymard1993' == model:
am = (1.0 / (np.cos(zenith_rad) +
0.00176759*(z)*((94.37515 - z) ** - 1.21563)))
else:
raise ValueError('%s is not a valid model for relativeairmass', model)
if isinstance(zenith, pd.Series):
am = pd.Series(am, index=zenith.index)
return am
def gueymard94_pw(temp_air, relative_humidity):
r"""
Calculates precipitable water (cm) from ambient air temperature (C)
and relatively humidity (%) using an empirical model. The
accuracy of this method is approximately 20% for moderate PW (1-3
cm) and less accurate otherwise.
The model was developed by expanding Eq. 1 in [2]_:
.. math::
Pw = 0.1 H_v \rho_v
using Eq. 2 in [2]_
.. math::
\rho_v = 216.7 R_H e_s /T
:math:`Pw` is the precipitable water (cm), :math:`H_v` is the apparent
water vapor scale height (km) and :math:`\rho_v` is the surface water
vapor density (g/m^3). . The expression for :math:`H_v` is Eq. 4 in [2]_:
.. math::
H_v = 0.4976 + 1.5265 \frac{T}{273.15}
+ \exp \left(13.6897 \frac{T}{273.15}
- 14.9188 \left( \frac{T}{273.15} \right)^3 \right)
In the expression for :math:`\rho_v`, :math:`e_s` is the saturation water
vapor pressure (millibar). The expression for :math:`e_s` is Eq. 1 in [3]_
.. math::
e_s = \exp \left(22.330 - 49.140 \frac{100}{T} -
10.922 \left(\frac{100}{T}\right)^2 -
0.39015 \frac{T}{100} \right)
Parameters
----------
temp_air : numeric
ambient air temperature :math:`T` at the surface. [C]
relative_humidity : numeric
relative humidity :math:`R_H` at the surface. [%]
Returns
-------
pw : numeric
precipitable water. [cm]
References
----------
.. [1] W. M. Keogh and A. W. Blakers, Accurate Measurement, Using Natural
Sunlight, of Silicon Solar Cells, Prog. in Photovoltaics: Res.
and Appl. 2004, vol 12, pp. 1-19 (:doi:`10.1002/pip.517`)
.. [2] C. Gueymard, Analysis of Monthly Average Atmospheric Precipitable
Water and Turbidity in Canada and Northern United States,
Solar Energy vol 53(1), pp. 57-71, 1994.
.. [3] C. Gueymard, Assessment of the Accuracy and Computing Speed of
simplified saturation vapor equations using a new reference
dataset, J. of Applied Meteorology 1993, vol. 32(7), pp.
1294-1300.
"""
T = temp_air + 273.15 # Convert to Kelvin # noqa: N806
RH = relative_humidity # noqa: N806
theta = T / 273.15
# Eq. 1 from Keogh and Blakers
pw = (
0.1 *
(0.4976 + 1.5265*theta + np.exp(13.6897*theta - 14.9188*(theta)**3)) *
(216.7*RH/(100*T)*np.exp(22.330 - 49.140*(100/T) -
10.922*(100/T)**2 - 0.39015*T/100)))
pw = np.maximum(pw, 0.1)
return pw
def first_solar_spectral_correction(pw, airmass_absolute,
module_type=None, coefficients=None,
min_pw=0.1, max_pw=8):
r"""
Spectral mismatch modifier based on precipitable water and absolute
(pressure-adjusted) airmass.
Estimates a spectral mismatch modifier :math:`M` representing the effect on
module short circuit current of variation in the spectral
irradiance. :math:`M` is estimated from absolute (pressure currected) air
mass, :math:`AM_a`, and precipitable water, :math:`Pw`, using the following
function:
.. math::
M = c_1 + c_2 AM_a + c_3 Pw + c_4 AM_a^{0.5}
+ c_5 Pw^{0.5} + c_6 \frac{AM_a} {Pw^{0.5}}
Default coefficients are determined for several cell types with
known quantum efficiency curves, by using the Simple Model of the
Atmospheric Radiative Transfer of Sunshine (SMARTS) [1]_. Using
SMARTS, spectrums are simulated with all combinations of AMa and
Pw where:
* :math:`0.5 \textrm{cm} <= Pw <= 5 \textrm{cm}`
* :math:`1.0 <= AM_a <= 5.0`
* Spectral range is limited to that of CMP11 (280 nm to 2800 nm)
* spectrum simulated on a plane normal to the sun
* All other parameters fixed at G173 standard
From these simulated spectra, M is calculated using the known
quantum efficiency curves. Multiple linear regression is then
applied to fit Eq. 1 to determine the coefficients for each module.
Based on the PVLIB Matlab function ``pvl_FSspeccorr`` by Mitchell
Lee and Alex Panchula of First Solar, 2016 [2]_.
Parameters
----------
pw : array-like
atmospheric precipitable water. [cm]
airmass_absolute : array-like
absolute (pressure-adjusted) airmass. [unitless]
min_pw : float, default 0.1
minimum atmospheric precipitable water. Any pw value lower than min_pw
is set to min_pw to avoid model divergence. [cm]
max_pw : float, default 8
maximum atmospheric precipitable water. Any pw value higher than max_pw
is set to NaN to avoid model divergence. [cm]
module_type : None or string, default None
a string specifying a cell type. Values of 'cdte', 'monosi', 'xsi',
'multisi', and 'polysi' (can be lower or upper case). If provided,
module_type selects default coefficients for the following modules:
* 'cdte' - First Solar Series 4-2 CdTe module.
* 'monosi', 'xsi' - First Solar TetraSun module.
* 'multisi', 'polysi' - anonymous multi-crystalline silicon module.
* 'cigs' - anonymous copper indium gallium selenide module.
* 'asi' - anonymous amorphous silicon module.
The module used to calculate the spectral correction
coefficients corresponds to the Multi-crystalline silicon
Manufacturer 2 Model C from [3]_. The spectral response (SR) of CIGS
and a-Si modules used to derive coefficients can be found in [4]_
coefficients : None or array-like, default None
Allows for entry of user-defined spectral correction
coefficients. Coefficients must be of length 6. Derivation of
coefficients requires use of SMARTS and PV module quantum
efficiency curve. Useful for modeling PV module types which are
not included as defaults, or to fine tune the spectral
correction to a particular PV module. Note that the parameters for
modules with very similar quantum efficiency should be similar,
in most cases limiting the need for module specific coefficients.
Returns
-------
modifier: array-like
spectral mismatch factor (unitless) which is can be multiplied
with broadband irradiance reaching a module's cells to estimate
effective irradiance, i.e., the irradiance that is converted to
electrical current.
References
----------
.. [1] Gueymard, Christian. SMARTS2: a simple model of the atmospheric
radiative transfer of sunshine: algorithms and performance
assessment. Cocoa, FL: Florida Solar Energy Center, 1995.
.. [2] Lee, Mitchell, and Panchula, Alex. "Spectral Correction for
Photovoltaic Module Performance Based on Air Mass and Precipitable
Water." IEEE Photovoltaic Specialists Conference, Portland, 2016
.. [3] Marion, William F., et al. User's Manual for Data for Validating
Models for PV Module Performance. National Renewable Energy
Laboratory, 2014. http://www.nrel.gov/docs/fy14osti/61610.pdf
.. [4] Schweiger, M. and Hermann, W, Influence of Spectral Effects
on Energy Yield of Different PV Modules: Comparison of Pwat and
MMF Approach, TUV Rheinland Energy GmbH report 21237296.003,
January 2017
"""
# --- Screen Input Data ---
# *** Pw ***
# Replace Pw Values below 0.1 cm with 0.1 cm to prevent model from
# diverging"
pw = np.atleast_1d(pw)
pw = pw.astype('float64')
if np.min(pw) < min_pw:
pw = np.maximum(pw, min_pw)
warn(f'Exceptionally low pw values replaced with {min_pw} cm to '
'prevent model divergence')
# Warn user about Pw data that is exceptionally high
if np.max(pw) > max_pw:
pw[pw > max_pw] = np.nan
warn('Exceptionally high pw values replaced by np.nan: '
'check input data.')
# *** AMa ***
# Replace Extremely High AM with AM 10 to prevent model divergence
# AM > 10 will only occur very close to sunset
if np.max(airmass_absolute) > 10:
airmass_absolute = np.minimum(airmass_absolute, 10)
# Warn user about AMa data that is exceptionally low
if np.min(airmass_absolute) < 0.58:
warn('Exceptionally low air mass: ' +
'model not intended for extra-terrestrial use')
# pvl_absoluteairmass(1,pvl_alt2pres(4340)) = 0.58 Elevation of
# Mina Pirquita, Argentian = 4340 m. Highest elevation city with
# population over 50,000.
_coefficients = {}
_coefficients['cdte'] = (
0.86273, -0.038948, -0.012506, 0.098871, 0.084658, -0.0042948)
_coefficients['monosi'] = (
0.85914, -0.020880, -0.0058853, 0.12029, 0.026814, -0.0017810)
_coefficients['xsi'] = _coefficients['monosi']
_coefficients['polysi'] = (
0.84090, -0.027539, -0.0079224, 0.13570, 0.038024, -0.0021218)
_coefficients['multisi'] = _coefficients['polysi']
_coefficients['cigs'] = (
0.85252, -0.022314, -0.0047216, 0.13666, 0.013342, -0.0008945)
_coefficients['asi'] = (
1.12094, -0.047620, -0.0083627, -0.10443, 0.098382, -0.0033818)
if module_type is not None and coefficients is None:
coefficients = _coefficients[module_type.lower()]
elif module_type is None and coefficients is not None:
pass
elif module_type is None and coefficients is None:
raise TypeError('No valid input provided, both module_type and ' +
'coefficients are None')
else:
raise TypeError('Cannot resolve input, must supply only one of ' +
'module_type and coefficients')
# Evaluate Spectral Shift
coeff = coefficients
ama = airmass_absolute
modifier = (
coeff[0] + coeff[1]*ama + coeff[2]*pw + coeff[3]*np.sqrt(ama) +
coeff[4]*np.sqrt(pw) + coeff[5]*ama/np.sqrt(pw))
return modifier
def bird_hulstrom80_aod_bb(aod380, aod500):
"""
Approximate broadband aerosol optical depth.
Bird and Hulstrom developed a correlation for broadband aerosol optical
depth (AOD) using two wavelengths, 380 nm and 500 nm.
Parameters
----------
aod380 : numeric
AOD measured at 380 nm. [unitless]
aod500 : numeric
AOD measured at 500 nm. [unitless]
Returns
-------
aod_bb : numeric
Broadband AOD. [unitless]
See also
--------
pvlib.atmosphere.kasten96_lt
References
----------
.. [1] Bird and Hulstrom, "Direct Insolation Models" (1980)
`SERI/TR-335-344 <http://www.nrel.gov/docs/legosti/old/344.pdf>`_
.. [2] R. E. Bird and R. L. Hulstrom, "Review, Evaluation, and Improvement
of Direct Irradiance Models", Journal of Solar Energy Engineering
103(3), pp. 182-192 (1981)
:doi:`10.1115/1.3266239`
"""
# approximate broadband AOD using (Bird-Hulstrom 1980)
return 0.27583 * aod380 + 0.35 * aod500
def kasten96_lt(airmass_absolute, precipitable_water, aod_bb):
"""
Calculate Linke turbidity using Kasten pyrheliometric formula.
Note that broadband aerosol optical depth (AOD) can be approximated by AOD
measured at 700 nm according to Molineaux [4] . Bird and Hulstrom offer an
alternate approximation using AOD measured at 380 nm and 500 nm.
Based on original implementation by Armel Oumbe.
.. warning::
These calculations are only valid for airmass less than 5 and
precipitable water less than 5 cm.
Parameters
----------
airmass_absolute : numeric
Pressure-adjusted airmass. [unitless]
precipitable_water : numeric
Precipitable water. [cm]
aod_bb : numeric
broadband AOD. [unitless]
Returns
-------
lt : numeric
Linke turbidity. [unitless]
See also
--------
pvlib.atmosphere.bird_hulstrom80_aod_bb
pvlib.atmosphere.angstrom_aod_at_lambda
References
----------
.. [1] F. Linke, "Transmissions-Koeffizient und Trubungsfaktor", Beitrage
zur Physik der Atmosphare, Vol 10, pp. 91-103 (1922)
.. [2] F. Kasten, "A simple parameterization of the pyrheliometric formula
for determining the Linke turbidity factor", Meteorologische Rundschau
33, pp. 124-127 (1980)
.. [3] Kasten, "The Linke turbidity factor based on improved values of the
integral Rayleigh optical thickness", Solar Energy, Vol. 56, No. 3,
pp. 239-244 (1996)
:doi:`10.1016/0038-092X(95)00114-7`
.. [4] B. Molineaux, P. Ineichen, N. O'Neill, "Equivalence of
pyrheliometric and monochromatic aerosol optical depths at a single key
wavelength", Applied Optics Vol. 37, issue 10, 7008-7018 (1998)
:doi:`10.1364/AO.37.007008`
.. [5] P. Ineichen, "Conversion function between the Linke turbidity and
the atmospheric water vapor and aerosol content", Solar Energy 82,
pp. 1095-1097 (2008)
:doi:`10.1016/j.solener.2008.04.010`
.. [6] P. Ineichen and R. Perez, "A new airmass independent formulation for
the Linke Turbidity coefficient", Solar Energy, Vol. 73, no. 3,
pp. 151-157 (2002)
:doi:`10.1016/S0038-092X(02)00045-2`
"""
# "From numerically integrated spectral simulations done with Modtran
# (Berk, 1989), Molineaux (1998) obtained for the broadband optical depth
# of a clean and dry atmospshere (fictitious atmosphere that comprises only
# the effects of Rayleigh scattering and absorption by the atmosphere gases
# other than the water vapor) the following expression"
# - P. Ineichen (2008)
delta_cda = -0.101 + 0.235 * airmass_absolute ** (-0.16)
# "and the broadband water vapor optical depth where pwat is the integrated
# precipitable water vapor content of the atmosphere expressed in cm and am
# the optical air mass. The precision of these fits is better than 1% when
# compared with Modtran simulations in the range 1 < am < 5 and
# 0 < pwat < 5 cm at sea level" - P. Ineichen (2008)
delta_w = 0.112 * airmass_absolute ** (-0.55) * precipitable_water ** 0.34
# broadband AOD
delta_a = aod_bb
# "Then using the Kasten pyrheliometric formula (1980, 1996), the Linke
# turbidity at am = 2 can be written. The extension of the Linke turbidity
# coefficient to other values of air mass was published by Ineichen and
# Perez (2002)" - P. Ineichen (2008)
lt = -(9.4 + 0.9 * airmass_absolute) * np.log(
np.exp(-airmass_absolute * (delta_cda + delta_w + delta_a))
) / airmass_absolute
# filter out of extrapolated values
return lt
def angstrom_aod_at_lambda(aod0, lambda0, alpha=1.14, lambda1=700.0):
r"""
Get AOD at specified wavelength using Angstrom turbidity model.
Parameters
----------
aod0 : numeric
Aerosol optical depth (AOD) measured at wavelength ``lambda0``.
[unitless]
lambda0 : numeric
Wavelength corresponding to ``aod0``. [nm]
alpha : numeric, default 1.14
Angstrom :math:`\alpha` exponent corresponding to ``aod0``. [unitless]
lambda1 : numeric, default 700
Desired wavelength. [nm]
Returns
-------
aod1 : numeric
AOD at desired wavelength ``lambda1``. [unitless]
See also
--------
pvlib.atmosphere.angstrom_alpha
References
----------
.. [1] Anders Angstrom, "On the Atmospheric Transmission of Sun Radiation
and On Dust in the Air", Geografiska Annaler Vol. 11, pp. 156-166 (1929)
JSTOR
:doi:`10.2307/519399`
.. [2] Anders Angstrom, "Techniques of Determining the Turbidity of the
Atmosphere", Tellus 13:2, pp. 214-223 (1961) Taylor & Francis
:doi:`10.3402/tellusa.v13i2.9493` and Co-Action Publishing
:doi:`10.1111/j.2153-3490.1961.tb00078.x`
"""
return aod0 * ((lambda1 / lambda0) ** (-alpha))
def angstrom_alpha(aod1, lambda1, aod2, lambda2):
r"""
Calculate Angstrom alpha exponent.
Parameters
----------
aod1 : numeric
Aerosol optical depth at wavelength ``lambda1``. [unitless]
lambda1 : numeric
Wavelength corresponding to ``aod1``. [nm]
aod2 : numeric
Aerosol optical depth at wavelength ``lambda2``. [unitless]
lambda2 : numeric
Wavelength corresponding to ``aod2``. [nm]
Returns
-------
alpha : numeric
Angstrom :math:`\alpha` exponent for wavelength in
``(lambda1, lambda2)``. [unitless]
See also
--------
pvlib.atmosphere.angstrom_aod_at_lambda
"""
return - np.log(aod1 / aod2) / np.log(lambda1 / lambda2)
| bsd-3-clause |
Ambuj-UF/ConCat-1.0 | src/Utils/Bio/Phylo/_utils.py | 1 | 20969 | # Copyright (C) 2009 by Eric Talevich (eric.talevich@gmail.com)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
import sys
# Add path to Bio
sys.path.append('../..')
"""Utilities for handling, displaying and exporting Phylo trees.
Third-party libraries are loaded when the corresponding function is called.
"""
__docformat__ = "restructuredtext en"
import math
import sys
def to_networkx(tree):
"""Convert a Tree object to a networkx graph.
The result is useful for graph-oriented analysis, and also interactive
plotting with pylab, matplotlib or pygraphviz, though the resulting diagram
is usually not ideal for displaying a phylogeny.
Requires NetworkX version 0.99 or later.
"""
try:
import networkx
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NetworkX if you want to use to_networkx.")
# NB (1/2010): the networkx API stabilized at v.1.0
# 1.0+: edges accept arbitrary data as kwargs, weights are floats
# 0.99: edges accept weight as a string, nothing else
# pre-0.99: edges accept no additional data
# Ubuntu Lucid LTS uses v0.99, let's support everything
if networkx.__version__ >= '1.0':
def add_edge(graph, n1, n2):
graph.add_edge(n1, n2, weight=n2.branch_length or 1.0)
# Copy branch color value as hex, if available
if hasattr(n2, 'color') and n2.color is not None:
graph[n1][n2]['color'] = n2.color.to_hex()
elif hasattr(n1, 'color') and n1.color is not None:
# Cascading color attributes
graph[n1][n2]['color'] = n1.color.to_hex()
n2.color = n1.color
# Copy branch weight value (float) if available
if hasattr(n2, 'width') and n2.width is not None:
graph[n1][n2]['width'] = n2.width
elif hasattr(n1, 'width') and n1.width is not None:
# Cascading width attributes
graph[n1][n2]['width'] = n1.width
n2.width = n1.width
elif networkx.__version__ >= '0.99':
def add_edge(graph, n1, n2):
graph.add_edge(n1, n2, (n2.branch_length or 1.0))
else:
def add_edge(graph, n1, n2):
graph.add_edge(n1, n2)
def build_subgraph(graph, top):
"""Walk down the Tree, building graphs, edges and nodes."""
for clade in top:
graph.add_node(clade.root)
add_edge(graph, top.root, clade.root)
build_subgraph(graph, clade)
if tree.rooted:
G = networkx.DiGraph()
else:
G = networkx.Graph()
G.add_node(tree.root)
build_subgraph(G, tree.root)
return G
def draw_graphviz(tree, label_func=str, prog='twopi', args='',
node_color='#c0deff', **kwargs):
"""Display a tree or clade as a graph, using the graphviz engine.
Requires NetworkX, matplotlib, Graphviz and either PyGraphviz or pydot.
The third and fourth parameters apply to Graphviz, and the remaining
arbitrary keyword arguments are passed directly to networkx.draw(), which
in turn mostly wraps matplotlib/pylab. See the documentation for Graphviz
and networkx for detailed explanations.
The NetworkX/matplotlib parameters are described in the docstrings for
networkx.draw() and pylab.scatter(), but the most reasonable options to try
are: *alpha, node_color, node_size, node_shape, edge_color, style,
font_size, font_color, font_weight, font_family*
:Parameters:
label_func : callable
A function to extract a label from a node. By default this is str(),
but you can use a different function to select another string
associated with each node. If this function returns None for a node,
no label will be shown for that node.
The label will also be silently skipped if the throws an exception
related to ordinary attribute access (LookupError, AttributeError,
ValueError); all other exception types will still be raised. This
means you can use a lambda expression that simply attempts to look
up the desired value without checking if the intermediate attributes
are available:
>>> Phylo.draw_graphviz(tree, lambda n: n.taxonomies[0].code)
prog : string
The Graphviz program to use when rendering the graph. 'twopi'
behaves the best for large graphs, reliably avoiding crossing edges,
but for moderate graphs 'neato' looks a bit nicer. For small
directed graphs, 'dot' may produce a normal-looking cladogram, but
will cross and distort edges in larger graphs. (The programs 'circo'
and 'fdp' are not recommended.)
args : string
Options passed to the external graphviz program. Normally not
needed, but offered here for completeness.
Example
-------
>>> import pylab
>>> from Bio import Phylo
>>> tree = Phylo.read('ex/apaf.xml', 'phyloxml')
>>> Phylo.draw_graphviz(tree)
>>> pylab.show()
>>> pylab.savefig('apaf.png')
"""
try:
import networkx
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install NetworkX if you want to use to_networkx.")
G = to_networkx(tree)
try:
# NetworkX version 1.8 or later (2013-01-20)
Gi = networkx.convert_node_labels_to_integers(G,
label_attribute='label')
int_labels = {}
for integer, nodeattrs in Gi.node.items():
int_labels[nodeattrs['label']] = integer
except TypeError:
# Older NetworkX versions (before 1.8)
Gi = networkx.convert_node_labels_to_integers(G,
discard_old_labels=False)
int_labels = Gi.node_labels
try:
posi = networkx.graphviz_layout(Gi, prog, args=args)
except ImportError:
raise MissingPythonDependencyError(
"Install PyGraphviz or pydot if you want to use draw_graphviz.")
def get_label_mapping(G, selection):
"""Apply the user-specified node relabeling."""
for node in G.nodes():
if (selection is None) or (node in selection):
try:
label = label_func(node)
if label not in (None, node.__class__.__name__):
yield (node, label)
except (LookupError, AttributeError, ValueError):
pass
if 'nodelist' in kwargs:
labels = dict(get_label_mapping(G, set(kwargs['nodelist'])))
else:
labels = dict(get_label_mapping(G, None))
kwargs['nodelist'] = list(labels.keys())
if 'edge_color' not in kwargs:
kwargs['edge_color'] = [isinstance(e[2], dict) and
e[2].get('color', 'k') or 'k'
for e in G.edges(data=True)]
if 'width' not in kwargs:
kwargs['width'] = [isinstance(e[2], dict) and
e[2].get('width', 1.0) or 1.0
for e in G.edges(data=True)]
posn = dict((n, posi[int_labels[n]]) for n in G)
networkx.draw(G, posn, labels=labels, node_color=node_color, **kwargs)
def draw_ascii(tree, file=None, column_width=80):
"""Draw an ascii-art phylogram of the given tree.
The printed result looks like::
_________ Orange
______________|
| |______________ Tangerine
______________|
| | _________________________ Grapefruit
_| |_________|
| |______________ Pummelo
|
|__________________________________ Apple
:Parameters:
file : file-like object
File handle opened for writing the output drawing. (Default:
standard output)
column_width : int
Total number of text columns used by the drawing.
"""
if file is None:
file = sys.stdout
taxa = tree.get_terminals()
# Some constants for the drawing calculations
max_label_width = max(len(str(taxon)) for taxon in taxa)
drawing_width = column_width - max_label_width - 1
drawing_height = 2 * len(taxa) - 1
def get_col_positions(tree):
"""Create a mapping of each clade to its column position."""
depths = tree.depths()
# If there are no branch lengths, assume unit branch lengths
if not max(depths.values()):
depths = tree.depths(unit_branch_lengths=True)
# Potential drawing overflow due to rounding -- 1 char per tree layer
fudge_margin = int(math.ceil(math.log(len(taxa), 2)))
cols_per_branch_unit = ((drawing_width - fudge_margin)
/ float(max(depths.values())))
return dict((clade, int(blen * cols_per_branch_unit + 1.0))
for clade, blen in depths.items())
def get_row_positions(tree):
positions = dict((taxon, 2 * idx) for idx, taxon in enumerate(taxa))
def calc_row(clade):
for subclade in clade:
if subclade not in positions:
calc_row(subclade)
positions[clade] = ((positions[clade.clades[0]] +
positions[clade.clades[-1]]) // 2)
calc_row(tree.root)
return positions
col_positions = get_col_positions(tree)
row_positions = get_row_positions(tree)
char_matrix = [[' ' for x in range(drawing_width)]
for y in range(drawing_height)]
def draw_clade(clade, startcol):
thiscol = col_positions[clade]
thisrow = row_positions[clade]
# Draw a horizontal line
for col in range(startcol, thiscol):
char_matrix[thisrow][col] = '_'
if clade.clades:
# Draw a vertical line
toprow = row_positions[clade.clades[0]]
botrow = row_positions[clade.clades[-1]]
for row in range(toprow + 1, botrow + 1):
char_matrix[row][thiscol] = '|'
# NB: Short terminal branches need something to stop rstrip()
if (col_positions[clade.clades[0]] - thiscol) < 2:
char_matrix[toprow][thiscol] = ','
# Draw descendents
for child in clade:
draw_clade(child, thiscol + 1)
draw_clade(tree.root, 0)
# Print the complete drawing
for idx, row in enumerate(char_matrix):
line = ''.join(row).rstrip()
# Add labels for terminal taxa in the right margin
if idx % 2 == 0:
line += ' ' + str(taxa[idx // 2])
file.write(line + '\n')
file.write('\n')
def draw(tree, label_func=str, do_show=True, show_confidence=True,
# For power users
axes=None, branch_labels=None, *args, **kwargs):
"""Plot the given tree using matplotlib (or pylab).
The graphic is a rooted tree, drawn with roughly the same algorithm as
draw_ascii.
Additional keyword arguments passed into this function are used as pyplot
options. The input format should be in the form of:
pyplot_option_name=(tuple), pyplot_option_name=(tuple, dict), or
pyplot_option_name=(dict).
Example using the pyplot options 'axhspan' and 'axvline':
>>> Phylo.draw(tree, axhspan=((0.25, 7.75), {'facecolor':'0.5'}),
... axvline={'x':'0', 'ymin':'0', 'ymax':'1'})
Visual aspects of the plot can also be modified using pyplot's own functions
and objects (via pylab or matplotlib). In particular, the pyplot.rcParams
object can be used to scale the font size (rcParams["font.size"]) and line
width (rcParams["lines.linewidth"]).
:Parameters:
label_func : callable
A function to extract a label from a node. By default this is str(),
but you can use a different function to select another string
associated with each node. If this function returns None for a node,
no label will be shown for that node.
do_show : bool
Whether to show() the plot automatically.
show_confidence : bool
Whether to display confidence values, if present on the tree.
axes : matplotlib/pylab axes
If a valid matplotlib.axes.Axes instance, the phylogram is plotted
in that Axes. By default (None), a new figure is created.
branch_labels : dict or callable
A mapping of each clade to the label that will be shown along the
branch leading to it. By default this is the confidence value(s) of
the clade, taken from the ``confidence`` attribute, and can be
easily toggled off with this function's ``show_confidence`` option.
But if you would like to alter the formatting of confidence values,
or label the branches with something other than confidence, then use
this option.
"""
try:
import matplotlib.pyplot as plt
except ImportError:
try:
import pylab as plt
except ImportError:
from Bio import MissingPythonDependencyError
raise MissingPythonDependencyError(
"Install matplotlib or pylab if you want to use draw.")
import matplotlib.collections as mpcollections
# Arrays that store lines for the plot of clades
horizontal_linecollections = []
vertical_linecollections = []
# Options for displaying branch labels / confidence
def conf2str(conf):
if int(conf) == conf:
return str(int(conf))
return str(conf)
if not branch_labels:
if show_confidence:
def format_branch_label(clade):
if hasattr(clade, 'confidences'):
# phyloXML supports multiple confidences
return '/'.join(conf2str(cnf.value)
for cnf in clade.confidences)
if clade.confidence:
return conf2str(clade.confidence)
return None
else:
def format_branch_label(clade):
return None
elif isinstance(branch_labels, dict):
def format_branch_label(clade):
return branch_labels.get(clade)
else:
assert callable(branch_labels), \
"branch_labels must be either a dict or a callable (function)"
format_branch_label = branch_labels
# Layout
def get_x_positions(tree):
"""Create a mapping of each clade to its horizontal position.
Dict of {clade: x-coord}
"""
depths = tree.depths()
# If there are no branch lengths, assume unit branch lengths
if not max(depths.values()):
depths = tree.depths(unit_branch_lengths=True)
return depths
def get_y_positions(tree):
"""Create a mapping of each clade to its vertical position.
Dict of {clade: y-coord}.
Coordinates are negative, and integers for tips.
"""
maxheight = tree.count_terminals()
# Rows are defined by the tips
heights = dict((tip, maxheight - i)
for i, tip in enumerate(reversed(tree.get_terminals())))
# Internal nodes: place at midpoint of children
def calc_row(clade):
for subclade in clade:
if subclade not in heights:
calc_row(subclade)
# Closure over heights
heights[clade] = (heights[clade.clades[0]] +
heights[clade.clades[-1]]) / 2.0
if tree.root.clades:
calc_row(tree.root)
return heights
x_posns = get_x_positions(tree)
y_posns = get_y_positions(tree)
# The function draw_clade closes over the axes object
if axes is None:
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
elif not isinstance(axes, plt.matplotlib.axes.Axes):
raise ValueError("Invalid argument for axes: %s" % axes)
def draw_clade_lines(use_linecollection=False, orientation='horizontal',
y_here=0, x_start=0, x_here=0, y_bot=0, y_top=0,
color='black', lw='.1'):
"""Create a line with or without a line collection object.
Graphical formatting of the lines representing clades in the plot can be
customized by altering this function.
"""
if (use_linecollection is False and orientation == 'horizontal'):
axes.hlines(y_here, x_start, x_here, color=color, lw=lw)
elif (use_linecollection is True and orientation == 'horizontal'):
horizontal_linecollections.append(mpcollections.LineCollection(
[[(x_start, y_here), (x_here, y_here)]], color=color, lw=lw),)
elif (use_linecollection is False and orientation == 'vertical'):
axes.vlines(x_here, y_bot, y_top, color=color)
elif (use_linecollection is True and orientation == 'vertical'):
vertical_linecollections.append(mpcollections.LineCollection(
[[(x_here, y_bot), (x_here, y_top)]], color=color, lw=lw),)
def draw_clade(clade, x_start, color, lw):
"""Recursively draw a tree, down from the given clade."""
x_here = x_posns[clade]
y_here = y_posns[clade]
# phyloXML-only graphics annotations
if hasattr(clade, 'color') and clade.color is not None:
color = clade.color.to_hex()
if hasattr(clade, 'width') and clade.width is not None:
lw = clade.width * plt.rcParams['lines.linewidth']
# Draw a horizontal line from start to here
draw_clade_lines(use_linecollection=True, orientation='horizontal',
y_here=y_here, x_start=x_start, x_here=x_here, color=color, lw=lw)
# Add node/taxon labels
label = label_func(clade)
if label not in (None, clade.__class__.__name__):
axes.text(x_here, y_here, ' %s' %
label, verticalalignment='center')
# Add label above the branch (optional)
conf_label = format_branch_label(clade)
if conf_label:
axes.text(0.5 * (x_start + x_here), y_here, conf_label,
fontsize='small', horizontalalignment='center')
if clade.clades:
# Draw a vertical line connecting all children
y_top = y_posns[clade.clades[0]]
y_bot = y_posns[clade.clades[-1]]
# Only apply widths to horizontal lines, like Archaeopteryx
draw_clade_lines(use_linecollection=True, orientation='vertical',
x_here=x_here, y_bot=y_bot, y_top=y_top, color=color, lw=lw)
# Draw descendents
for child in clade:
draw_clade(child, x_here, color, lw)
draw_clade(tree.root, 0, 'k', plt.rcParams['lines.linewidth'])
# If line collections were used to create clade lines, here they are added
# to the pyplot plot.
for i in horizontal_linecollections:
axes.add_collection(i)
for i in vertical_linecollections:
axes.add_collection(i)
# Aesthetics
if hasattr(tree, 'name') and tree.name:
axes.set_title(tree.name)
axes.set_xlabel('branch length')
axes.set_ylabel('taxa')
# Add margins around the tree to prevent overlapping the axes
xmax = max(x_posns.values())
axes.set_xlim(-0.05 * xmax, 1.25 * xmax)
# Also invert the y-axis (origin at the top)
# Add a small vertical margin, but avoid including 0 and N+1 on the y axis
axes.set_ylim(max(y_posns.values()) + 0.8, 0.2)
# Parse and process key word arguments as pyplot options
for key, value in kwargs.items():
try:
# Check that the pyplot option input is iterable, as required
[i for i in value]
except TypeError:
raise ValueError('Keyword argument "%s=%s" is not in the format '
'pyplot_option_name=(tuple), pyplot_option_name=(tuple, dict),'
' or pyplot_option_name=(dict) '
% (key, value))
if isinstance(value, dict):
getattr(plt, str(key))(**dict(value))
elif not (isinstance(value[0], tuple)):
getattr(plt, str(key))(*value)
elif (isinstance(value[0], tuple)):
getattr(plt, str(key))(*value[0], **dict(value[1]))
if do_show:
plt.show()
| gpl-2.0 |
jpautom/scikit-learn | benchmarks/bench_plot_omp_lars.py | 266 | 4447 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
| bsd-3-clause |
FourthCohortAwesome/NightThree | exercise_gd.py | 1 | 1821 | import pandas as pd
import csv
def length_3(data):
"""
This function reads a .csv file and to determines corrections
:param data: csv file
:return: write a new file without missing values
"""
df = pd.read_csv(data)
headings = ['One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven']
row = ['1', '2', '3', '4', '5', '6', '7']
for line in df.index:
df.loc[line] = row
df.to_csv('Dest_gd_alt3.tsv', sep='\t', index=False)
def length_7(data):
"""
This function reads a .csv file and to determines corrections
:param data: csv file
:return: write a new file without missing values
"""
df = pd.read_csv(data)
headings = ['One', 'Two', 'Three', 'Four']
row = ['1', '2', '3', '4']
for line in df.index:
df.loc[line] = row
df.to_csv('Dest_gd_source.tsv', sep='\t', index=False)
def length_9(data):
"""
This function reads a .csv file and to determines corrections
:param data: csv file
:return: write a new file without missing values
"""
df = pd.read_csv('alt2.csv', sep=':')
row1 = ['1', '3']
row2 = ['2', '4']
row3 = ['3', '6']
for i, row in df.iterrows():
if row['ONE'] == 1 or row['TWO'] == 3:
df.loc[i] = row1
elif row['ONE'] == 2 or row['TWO'] == 4:
df.loc[i] = row2
else:
df.loc[i] = row3
df.to_csv('Dest_gd_alt2.tsv', sep='\t', index=False)
def parse_file(file):
"""
This function takes a file and determines which
function will correct the file.
:param file: csv file
:return: None
"""
df = pd.read_csv(file)
length = len(df)
if length == 3:
length_3(file)
if length == 7:
length_7(file)
else:
length_9(df)
parse_file('source.csv')
| mit |
rochefort-lab/fissa | fissa/neuropil.py | 1 | 9035 | """
Functions for removal of neuropil from calcium signals.
Authors:
- Sander W Keemink <swkeemink@scimail.eu>
- Scott C Lowe <scott.code.lowe@gmail.com>
Created:
2015-05-15
"""
import numpy as np
import numpy.random as rand
import sklearn.decomposition
def separate(
S,
sep_method="nmf",
n=None,
maxiter=10000,
tol=1e-4,
random_state=892,
maxtries=10,
W0=None,
H0=None,
alpha=0.1,
):
"""
Find independent signals, sorted by matching score against the first input signal.
Parameters
----------
S : :term:`array_like` shaped (signals, observations)
2-d array containing mixed input signals.
Each column of `S` should be a different signal, and each row an
observation of the signals. For ``S[i, j]``, ``j`` is a signal, and
``i`` is an observation.
The first column, ``j = 0``, is considered the primary signal and the
one for which we will try to extract a decontaminated equivalent.
sep_method : {"ica", "nmf"}
Which source separation method to use, either ICA or NMF.
- ``"ica"``: Independent Component Analysis
- ``"nmf"``: Non-negative Matrix Factorization
n : int, optional
How many components to estimate. If ``None`` (default), for the NMF
method, ``n`` is the number of input signals; for the ICA method,
we use PCA to estimate how many components would explain at least 99%
of the variance and adopt this value for ``n``.
maxiter : int, optional
Number of maximally allowed iterations. Default is ``10000``.
tol : float, optional
Error tolerance for termination. Default is ``1e-4``.
random_state : int or None, optional
Initial state for the random number generator. Set to ``None`` to use
the numpy.random default. Default seed is ``892``.
maxtries : int, optional
Maximum number of tries before algorithm should terminate.
Default is ``10``.
W0 : :term:`array_like`, optional
Optional starting condition for ``W`` in NMF algorithm.
(Ignored when using the ICA method.)
H0 : :term:`array_like`, optional
Optional starting condition for ``H`` in NMF algorithm.
(Ignored when using the ICA method.)
alpha : float, optional
Sparsity regularizaton weight for NMF algorithm. Set to zero to
remove regularization. Default is ``0.1``.
(Ignored when using the ICA method.)
Returns
-------
S_sep : :class:`numpy.ndarray` shaped (signals, observations)
The raw separated traces.
S_matched : :class:`numpy.ndarray` shaped (signals, observations)
The separated traces matched to the primary signal, in order
of matching quality (see Notes below).
A_sep : :class:`numpy.ndarray` shaped (signals, signals)
Mixing matrix.
convergence : dict
Metadata for the convergence result, with the following keys and
values:
``convergence["random_state"]``
Seed for estimator initiation.
``convergence["iterations"]``
Number of iterations needed for convergence.
``convergence["max_iterations"]``
Maximum number of iterations allowed.
``convergence["converged"]``
Whether the algorithm converged or not (:class:`bool`).
Notes
-----
To identify which independent signal matches the primary signal best,
we first normalize the columns in the output mixing matrix `A` such that
``sum(A[:, separated]) = 1``. This results in a relative score of how
strongly each raw signal contributes to each separated signal. From this,
we find the separated signal to which the ROI trace makes the largest
(relative) contribution.
See Also
--------
sklearn.decomposition.NMF, sklearn.decomposition.FastICA
"""
# TODO for edge cases, reduce the number of npil regions according to
# possible orientations
# TODO split into several functions. Maybe turn into a class.
# Ensure array_like input is a numpy.ndarray
S = np.asarray(S)
# normalize
median = np.median(S)
S /= median
# estimate number of signals to find, if not given
if n is None:
if sep_method.lower() == "ica":
# Perform PCA
pca = sklearn.decomposition.PCA(whiten=False)
pca.fit(S.T)
# find number of components with at least x percent explained var
n = sum(pca.explained_variance_ratio_ > 0.01)
else:
n = S.shape[0]
for i_try in range(maxtries):
if sep_method.lower() in {"ica", "fastica"}:
# Use sklearn's implementation of ICA.
# Make an instance of the FastICA class. We can do whitening of
# the data now.
estimator = sklearn.decomposition.FastICA(
n_components=n,
whiten=True,
max_iter=maxiter,
tol=tol,
random_state=random_state,
)
# Perform ICA and find separated signals
S_sep = estimator.fit_transform(S.T)
elif sep_method.lower() in {"nmf", "nnmf"}:
# Make an instance of the sklearn NMF class
estimator = sklearn.decomposition.NMF(
init="nndsvdar" if W0 is None and H0 is None else "custom",
n_components=n,
alpha=alpha,
l1_ratio=0.5,
tol=tol,
max_iter=maxiter,
random_state=random_state,
)
# Perform NMF and find separated signals
S_sep = estimator.fit_transform(S.T, W=W0, H=H0)
elif hasattr(sklearn.decomposition, sep_method):
print(
"Using ad hoc signal decomposition method"
" sklearn.decomposition.{}. Only NMF and ICA are officially"
" supported.".format(sep_method)
)
# Load up arbitrary decomposition algorithm from sklearn
estimator = getattr(sklearn.decomposition, sep_method)(
n_components=n,
tol=tol,
max_iter=maxiter,
random_state=random_state,
)
S_sep = estimator.fit_transform(S.T)
else:
raise ValueError('Unknown separation method "{}".'.format(sep_method))
# check if max number of iterations was reached
if estimator.n_iter_ < maxiter:
print(
"{} converged after {} iterations.".format(
repr(estimator).split("(")[0], estimator.n_iter_
)
)
break
print(
"Attempt {} failed to converge at {} iterations.".format(
i_try + 1, estimator.n_iter_
)
)
if i_try + 1 < maxtries:
print("Trying a new random state.")
# Change to a new random_state
if random_state is not None:
random_state = (random_state + 1) % 2 ** 32
if estimator.n_iter_ == maxiter:
print(
"Warning: maximum number of allowed tries reached at {} iterations"
" for {} tries of different random seed states.".format(
estimator.n_iter_, i_try + 1
)
)
if hasattr(estimator, "mixing_"):
A_sep = estimator.mixing_
else:
A_sep = estimator.components_.T
# Normalize the columns in A so that sum(column)=1 (can be done in one line
# too).
# This results in a relative score of how strongly each separated signal
# is represented in each ROI signal.
#
# Our mixing matrix is shaped (input/raw, output/separated). For each
# separated (output) signal, we find how much weighting each input (raw)
# signal contributes to that separated signal, relative to the other input
# signals.
A = abs(np.copy(A_sep))
for j in range(n):
if np.sum(A[:, j]) != 0:
A[:, j] /= np.sum(A[:, j])
# get the scores for the somatic signal
scores = A[0, :]
# Rank the separated signals in descending ordering of their score.
# The separated signal to which the somatic signal makes up the largest
# contribution is sorted first.
order = np.argsort(scores)[::-1]
# Order the signals according to their scores, and scale the magnitude
# back to the original magnitude.
S_matched = np.zeros_like(S_sep)
for j in range(n):
S_matched[:, j] = A_sep[0, order[j]] * S_sep[:, order[j]]
# save the algorithm convergence info
convergence = {}
convergence["max_iterations"] = maxiter
convergence["random_state"] = random_state
convergence["iterations"] = estimator.n_iter_
convergence["converged"] = estimator.n_iter_ != maxiter
# scale back to raw magnitudes
S_matched *= median
S *= median
return S_sep.T, S_matched.T, A_sep, convergence
| gpl-3.0 |
loli/sklearn-ensembletrees | benchmarks/bench_sgd_regression.py | 14 | 4594 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
jbogaardt/chainladder-python | chainladder/tails/bondy.py | 1 | 5896 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
import numpy as np
import pandas as pd
from scipy.optimize import least_squares
from chainladder.tails import TailBase
from chainladder.development import DevelopmentBase, Development
class TailBondy(TailBase):
"""Estimator for the Generalized Bondy tail factor.
.. versionadded:: 0.6.0
Parameters
----------
earliest_age : int
The earliest age from which the Bondy exponent is to be calculated.
Defaults to earliest available in the Triangle. Any available development
age can be used.
attachment_age: int (default=None)
The age at which to attach the fitted curve. If None, then the latest
age is used. Measures of variability from original ``ldf_`` are retained
when being used in conjunction with the MackChainladder method.
Attributes
----------
ldf_ : Triangle
ldf with tail applied.
cdf_ : Triangle
cdf with tail applied.
tail_ : DataFrame
Point estimate of tail at latest maturity available in the Triangle.
b_ : DataFrame
The Bondy exponent
earliest_ldf_ : DataFrame
The LDF associated with the ``earliest_age`` pick.
sigma_ : Triangle
sigma with tail factor applied.
std_err_ : Triangle
std_err with tail factor applied
earliest_ldf_ : DataFrame
Based on the ``earliest_age`` selection, this shows the seed ``ldf_`` used
in fitting the Bondy exponent.
See also
--------
TailCurve
"""
def __init__(self, earliest_age=None, attachment_age=None):
self.earliest_age = earliest_age
self.attachment_age = attachment_age
def fit(self, X, y=None, sample_weight=None):
"""Fit the model with X.
Parameters
----------
X : Triangle-like
Set of LDFs to which the tail will be applied.
y : Ignored
sample_weight : Ignored
Returns
-------
self : object
Returns the instance itself.
"""
if self.attachment_age and self.attachment_age < self.earliest_age:
raise ValueError("attachment_age must not be before earliest_age.")
backend = X.array_backend
if X.array_backend == "cupy":
X = X.set_backend("numpy", deep=True)
else:
X = X.set_backend("numpy")
xp = X.get_array_module()
super().fit(X, y, sample_weight)
if self.earliest_age is None:
earliest_age = X.ddims[0]
else:
earliest_age = X.ddims[
int(
self.earliest_age / ({"Y": 12, "Q": 3, "M": 1}[X.development_grain])
)
- 1
]
attachment_age = self.attachment_age if self.attachment_age else X.ddims[-2]
obj = Development().fit_transform(X) if "ldf_" not in X else X
b_optimized = []
initial = xp.where(obj.ddims == earliest_age)[0][0] if earliest_age else 0
for num in range(len(obj.vdims)):
b0 = (xp.ones(obj.shape[0]) * 0.5)[:, None]
data = xp.log(obj.ldf_.values[:, num, 0, initial:])
b0 = xp.concatenate((b0, data[..., 0:1]), axis=1)
b_optimized.append(
least_squares(
TailBondy._solver, x0=b0.flatten(), kwargs={"data": data, "xp": xp}
).x
)
self.b_ = xp.concatenate(
[item.reshape(-1, 2)[:, 0:1] for item in b_optimized], axis=1
)[..., None, None]
self.earliest_ldf_ = xp.exp(
xp.concatenate(
[item.reshape(-1, 2)[:, 1:2] for item in b_optimized], axis=1
)[..., None, None]
)
if sum(X.ddims > earliest_age) > 1:
tail = xp.exp(self.earliest_ldf_ * self.b_ ** (len(obj.ldf_.ddims) - 1))
else:
tail = self.ldf_.values[..., 0, initial]
tail = tail ** (self.b_ / (1 - self.b_))
f0 = self.ldf_.values[..., 0:1, initial : initial + 1]
fitted = f0 ** (
self.b_ ** (np.arange(sum(X.ddims >= earliest_age))[None, None, None, :])
)
fitted = xp.concatenate(
(fitted, fitted[..., -1:] ** (self.b_ / (1 - self.b_))), axis=-1
)
fitted = xp.repeat(fitted, self.ldf_.shape[2], axis=2)
rows = X.index.set_index(X.key_labels).index
self.b_ = pd.DataFrame(self.b_[..., 0, 0], index=rows, columns=X.vdims)
self.earliest_ldf_ = pd.DataFrame(
self.earliest_ldf_[..., 0, 0], index=rows, columns=X.vdims
)
self.ldf_.values = xp.concatenate(
(
self.ldf_.values[..., : sum(X.ddims <= attachment_age)],
fitted[..., -sum(X.ddims >= attachment_age) :],
),
axis=-1,
)
self._get_tail_stats(obj)
if backend == "cupy":
self = self.set_backend(backend, inplace=True, deep=True)
return self
def transform(self, X):
"""Transform X.
Parameters
----------
X : Triangle
Triangle must contain the ``ldf_`` development attribute.
Returns
-------
X_new : Triangle
New Triangle with tail factor applied to its development
attributes.
"""
X_new = super().transform(X)
X_new.b_ = self.b_
X_new.earliest_ldf_ = self.earliest_ldf_
return X_new
@staticmethod
def _solver(b, data, xp):
b = b.reshape(-1, 2)
arange = xp.repeat(xp.arange(data.shape[-1])[None, :], data.shape[0], 0)
out = data - (b[:, 1:2]) * b[:, 0:1] ** (arange)
return out.flatten()
| mit |
meduz/scikit-learn | examples/model_selection/grid_search_text_feature_extraction.py | 99 | 4163 |
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
| bsd-3-clause |
ElDeveloper/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
GoogleCloudPlatform/professional-services | tools/ml-auto-eda/ml_eda/analysis/quantitative_analyzer.py | 1 | 5502 | # Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Holds the main logics of quantitative analysis."""
from __future__ import absolute_import
from __future__ import print_function
from typing import Dict
import numpy as np
import pandas as pd
from scipy.stats import chi2, f
from ml_eda.preprocessing.analysis_query import query_constants
from ml_eda.analysis import utils
class QuantitativeAnalyzer:
"""Holds the main logics of quantitative analysis."""
@staticmethod
def anova_one_way(anova_df: pd.DataFrame) -> float:
"""Calculate the F-statistic over an ANOVA dataframe.
Args:
anova_df: (pandas.DataFrame), the pre-aggregated result from
bigquery. The header of the DataFrame is:
[
{anova_categorical},
{anova_count_per_class},
{anova_mean_per_class},
{anova_variance_per_class},
{anova_df_group},
{anova_df_error}
]
Returns:
P-value, (float)
"""
anova_mean_overall = anova_df[query_constants.ANOVA_MEAN_PER_CLASS].mean()
ssg = ((anova_df[
query_constants.ANOVA_MEAN_PER_CLASS] - anova_mean_overall) ** 2 *
anova_df[query_constants.ANOVA_COUNT_PER_CLASS]).sum()
sse = (anova_df[query_constants.ANOVA_VARIANCE_PER_CLASS] *
(anova_df[query_constants.ANOVA_COUNT_PER_CLASS] - 1)).sum()
df_group = anova_df[query_constants.ANOVA_DF_GROUP][0]
df_error = anova_df[query_constants.ANOVA_DF_ERROR][0]
inter_class_means_variation = ssg / df_group
intra_class_variation = sse / df_error
f_result = inter_class_means_variation / intra_class_variation
# given f-stats, find the p value
f_rv = f(df_group, df_error)
p_value = 1 - f_rv.cdf(f_result)
return p_value
@staticmethod
def chi_square(chi_square_df: pd.DataFrame) -> float:
"""Perform chi-square statistic test computation over an pre-aggregated
DataFrame.
Args:
chi_square_df: (pd.DataFrame), the pre-aggregated result from
bigquery.
Returns:
float
The DataFrame is in the format of
Col1 Col2 frequency
0 co1_v1 co2_v1 5
1 co1_v1 co2_v2 8602
2 co1_v1 co2_v3 707
3 co1_v2 co2_v1 4
4 co1_v2 co2_v2 42194
4 co1_v2 co2_v3 42194
"""
index_name, column_name, _ = chi_square_df.columns
# re-organize the dataframe
pv_df = chi_square_df.pivot_table(index=index_name,
columns=column_name,
values='frequency',
fill_value=0)
# total count
total = pv_df.sum().sum()
# compute the occurrence probability of each unique value of indexes
column_prob = pv_df.sum() / total
row_prob = pv_df.sum(axis=1) / total
# compute the expected occurrence table
expected_df = pd.DataFrame(np.outer(row_prob, column_prob) * total,
index=row_prob.index,
columns=column_prob.index,
dtype=np.int)
# compute chi-square stats
diff_df = expected_df - pv_df
# plus one here is for stability
chi_square_stats = (np.power(diff_df, 2) / (
expected_df + 1)).sum().sum()
# given chi-square stats, find the p value
dof = (len(column_prob) - 1) * (len(row_prob) - 1)
chi_square_rv = chi2(df=dof)
p_value = 1 - chi_square_rv.cdf(chi_square_stats)
return p_value
@staticmethod
def information_gain(ig_df: pd.DataFrame) -> float:
"""Compute information gain over an pre-aggregated DataFrame.
Args:
ig_df: (pd.DataFrame), the pre-aggregated result from
bigquery.
Returns:
float
The DataFrame is in the format of
Col1 Col2 frequency
0 co1_v1 co2_v1 5
1 co1_v1 co2_v2 8602
2 co1_v1 co2_v3 707
3 co1_v2 co2_v1 4
4 co1_v2 co2_v2 42194
4 co1_v2 co2_v3 42194
"""
index_name, column_name, _ = ig_df.columns
entropy = utils.compute_entropy(
frequency_series=ig_df.groupby(index_name).sum()['frequency'])
condition_entropy = utils.compute_conditional_entropy(
aggregate_df=ig_df,
condition_column=column_name,
entropy_column=index_name
)
information_gain = entropy - condition_entropy
return information_gain
@staticmethod
def pearson_correlation(corr_df: pd.DataFrame) -> Dict:
"""The entire pearson correlation is done in BQ, therefore, only
DataFrame to dict conversion is done here
Args:
corr_df: (pd.DataFrame), the computed correlation result from
bigquery.
Returns:
dict
"""
return corr_df.iloc[0, :].to_dict()
| apache-2.0 |
gfyoung/pandas | pandas/tests/test_sorting.py | 2 | 18315 | from collections import defaultdict
from datetime import datetime
from itertools import product
import numpy as np
import pytest
from pandas import DataFrame, MultiIndex, Series, array, concat, merge
import pandas._testing as tm
from pandas.core.algorithms import safe_sort
import pandas.core.common as com
from pandas.core.sorting import (
decons_group_index,
get_group_index,
is_int64_overflow_possible,
lexsort_indexer,
nargsort,
)
class TestSorting:
@pytest.mark.slow
def test_int64_overflow(self):
B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500)))
A = np.arange(2500)
df = DataFrame(
{
"A": A,
"B": B,
"C": A,
"D": B,
"E": A,
"F": B,
"G": A,
"H": B,
"values": np.random.randn(2500),
}
)
lg = df.groupby(["A", "B", "C", "D", "E", "F", "G", "H"])
rg = df.groupby(["H", "G", "F", "E", "D", "C", "B", "A"])
left = lg.sum()["values"]
right = rg.sum()["values"]
exp_index, _ = left.index.sortlevel()
tm.assert_index_equal(left.index, exp_index)
exp_index, _ = right.index.sortlevel(0)
tm.assert_index_equal(right.index, exp_index)
tups = list(map(tuple, df[["A", "B", "C", "D", "E", "F", "G", "H"]].values))
tups = com.asarray_tuplesafe(tups)
expected = df.groupby(tups).sum()["values"]
for k, v in expected.items():
assert left[k] == right[k[::-1]]
assert left[k] == v
assert len(left) == len(right)
@pytest.mark.arm_slow
def test_int64_overflow_moar(self):
# GH9096
values = range(55109)
data = DataFrame.from_dict({"a": values, "b": values, "c": values, "d": values})
grouped = data.groupby(["a", "b", "c", "d"])
assert len(grouped) == len(values)
arr = np.random.randint(-1 << 12, 1 << 12, (1 << 15, 5))
i = np.random.choice(len(arr), len(arr) * 4)
arr = np.vstack((arr, arr[i])) # add sume duplicate rows
i = np.random.permutation(len(arr))
arr = arr[i] # shuffle rows
df = DataFrame(arr, columns=list("abcde"))
df["jim"], df["joe"] = np.random.randn(2, len(df)) * 10
gr = df.groupby(list("abcde"))
# verify this is testing what it is supposed to test!
assert is_int64_overflow_possible(gr.grouper.shape)
# manually compute groupings
jim, joe = defaultdict(list), defaultdict(list)
for key, a, b in zip(map(tuple, arr), df["jim"], df["joe"]):
jim[key].append(a)
joe[key].append(b)
assert len(gr) == len(jim)
mi = MultiIndex.from_tuples(jim.keys(), names=list("abcde"))
def aggr(func):
f = lambda a: np.fromiter(map(func, a), dtype="f8")
arr = np.vstack((f(jim.values()), f(joe.values()))).T
res = DataFrame(arr, columns=["jim", "joe"], index=mi)
return res.sort_index()
tm.assert_frame_equal(gr.mean(), aggr(np.mean))
tm.assert_frame_equal(gr.median(), aggr(np.median))
def test_lexsort_indexer(self):
keys = [[np.nan] * 5 + list(range(100)) + [np.nan] * 5]
# orders=True, na_position='last'
result = lexsort_indexer(keys, orders=True, na_position="last")
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=True, na_position='first'
result = lexsort_indexer(keys, orders=True, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='last'
result = lexsort_indexer(keys, orders=False, na_position="last")
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
# orders=False, na_position='first'
result = lexsort_indexer(keys, orders=False, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp))
def test_nargsort(self):
# np.argsort(items) places NaNs last
items = [np.nan] * 5 + list(range(100)) + [np.nan] * 5
# np.argsort(items2) may not place NaNs first
items2 = np.array(items, dtype="O")
# mergesort is the most difficult to get right because we want it to be
# stable.
# According to numpy/core/tests/test_multiarray, """The number of
# sorted items must be greater than ~50 to check the actual algorithm
# because quick and merge sort fall over to insertion sort for small
# arrays."""
# mergesort, ascending=True, na_position='last'
result = nargsort(items, kind="mergesort", ascending=True, na_position="last")
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items, kind="mergesort", ascending=True, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items, kind="mergesort", ascending=False, na_position="last")
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(items, kind="mergesort", ascending=False, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='last'
result = nargsort(items2, kind="mergesort", ascending=True, na_position="last")
exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=True, na_position='first'
result = nargsort(items2, kind="mergesort", ascending=True, na_position="first")
exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='last'
result = nargsort(items2, kind="mergesort", ascending=False, na_position="last")
exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
# mergesort, ascending=False, na_position='first'
result = nargsort(
items2, kind="mergesort", ascending=False, na_position="first"
)
exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1))
tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False)
class TestMerge:
@pytest.mark.slow
def test_int64_overflow_issues(self):
# #2690, combinatorial explosion
df1 = DataFrame(np.random.randn(1000, 7), columns=list("ABCDEF") + ["G1"])
df2 = DataFrame(np.random.randn(1000, 7), columns=list("ABCDEF") + ["G2"])
# it works!
result = merge(df1, df2, how="outer")
assert len(result) == 2000
low, high, n = -1 << 10, 1 << 10, 1 << 20
left = DataFrame(np.random.randint(low, high, (n, 7)), columns=list("ABCDEFG"))
left["left"] = left.sum(axis=1)
# one-2-one match
i = np.random.permutation(len(left))
right = left.iloc[i].copy()
right.columns = right.columns[:-1].tolist() + ["right"]
right.index = np.arange(len(right))
right["right"] *= -1
out = merge(left, right, how="outer")
assert len(out) == len(left)
tm.assert_series_equal(out["left"], -out["right"], check_names=False)
result = out.iloc[:, :-2].sum(axis=1)
tm.assert_series_equal(out["left"], result, check_names=False)
assert result.name is None
out.sort_values(out.columns.tolist(), inplace=True)
out.index = np.arange(len(out))
for how in ["left", "right", "outer", "inner"]:
tm.assert_frame_equal(out, merge(left, right, how=how, sort=True))
# check that left merge w/ sort=False maintains left frame order
out = merge(left, right, how="left", sort=False)
tm.assert_frame_equal(left, out[left.columns.tolist()])
out = merge(right, left, how="left", sort=False)
tm.assert_frame_equal(right, out[right.columns.tolist()])
# one-2-many/none match
n = 1 << 11
left = DataFrame(
np.random.randint(low, high, (n, 7)).astype("int64"),
columns=list("ABCDEFG"),
)
# confirm that this is checking what it is supposed to check
shape = left.apply(Series.nunique).values
assert is_int64_overflow_possible(shape)
# add duplicates to left frame
left = concat([left, left], ignore_index=True)
right = DataFrame(
np.random.randint(low, high, (n // 2, 7)).astype("int64"),
columns=list("ABCDEFG"),
)
# add duplicates & overlap with left to the right frame
i = np.random.choice(len(left), n)
right = concat([right, right, left.iloc[i]], ignore_index=True)
left["left"] = np.random.randn(len(left))
right["right"] = np.random.randn(len(right))
# shuffle left & right frames
i = np.random.permutation(len(left))
left = left.iloc[i].copy()
left.index = np.arange(len(left))
i = np.random.permutation(len(right))
right = right.iloc[i].copy()
right.index = np.arange(len(right))
# manually compute outer merge
ldict, rdict = defaultdict(list), defaultdict(list)
for idx, row in left.set_index(list("ABCDEFG")).iterrows():
ldict[idx].append(row["left"])
for idx, row in right.set_index(list("ABCDEFG")).iterrows():
rdict[idx].append(row["right"])
vals = []
for k, lval in ldict.items():
rval = rdict.get(k, [np.nan])
for lv, rv in product(lval, rval):
vals.append(
k
+ (
lv,
rv,
)
)
for k, rval in rdict.items():
if k not in ldict:
for rv in rval:
vals.append(
k
+ (
np.nan,
rv,
)
)
def align(df):
df = df.sort_values(df.columns.tolist())
df.index = np.arange(len(df))
return df
def verify_order(df):
kcols = list("ABCDEFG")
tm.assert_frame_equal(
df[kcols].copy(), df[kcols].sort_values(kcols, kind="mergesort")
)
out = DataFrame(vals, columns=list("ABCDEFG") + ["left", "right"])
out = align(out)
jmask = {
"left": out["left"].notna(),
"right": out["right"].notna(),
"inner": out["left"].notna() & out["right"].notna(),
"outer": np.ones(len(out), dtype="bool"),
}
for how in ["left", "right", "outer", "inner"]:
mask = jmask[how]
frame = align(out[mask].copy())
assert mask.all() ^ mask.any() or how == "outer"
for sort in [False, True]:
res = merge(left, right, how=how, sort=sort)
if sort:
verify_order(res)
# as in GH9092 dtypes break with outer/right join
tm.assert_frame_equal(
frame, align(res), check_dtype=how not in ("right", "outer")
)
def test_decons():
def testit(codes_list, shape):
group_index = get_group_index(codes_list, shape, sort=True, xnull=True)
codes_list2 = decons_group_index(group_index, shape)
for a, b in zip(codes_list, codes_list2):
tm.assert_numpy_array_equal(a, b)
shape = (4, 5, 6)
codes_list = [
np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64),
np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64),
]
testit(codes_list, shape)
shape = (10000, 10000)
codes_list = [
np.tile(np.arange(10000, dtype=np.int64), 5),
np.tile(np.arange(10000, dtype=np.int64), 5),
]
testit(codes_list, shape)
class TestSafeSort:
def test_basic_sort(self):
values = [3, 1, 2, 0, 4]
result = safe_sort(values)
expected = np.array([0, 1, 2, 3, 4])
tm.assert_numpy_array_equal(result, expected)
values = list("baaacb")
result = safe_sort(values)
expected = np.array(list("aaabbc"), dtype="object")
tm.assert_numpy_array_equal(result, expected)
values = []
result = safe_sort(values)
expected = np.array([])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("verify", [True, False])
def test_codes(self, verify):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
codes = [0, 1, 1, 2, 3, 0, -1, 4]
result, result_codes = safe_sort(values, codes, verify=verify)
expected_codes = np.array([3, 1, 1, 2, 0, 3, -1, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_codes, expected_codes)
# na_sentinel
codes = [0, 1, 1, 2, 3, 0, 99, 4]
result, result_codes = safe_sort(values, codes, na_sentinel=99, verify=verify)
expected_codes = np.array([3, 1, 1, 2, 0, 3, 99, 4], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_codes, expected_codes)
codes = []
result, result_codes = safe_sort(values, codes, verify=verify)
expected_codes = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_codes, expected_codes)
@pytest.mark.parametrize("na_sentinel", [-1, 99])
def test_codes_out_of_bound(self, na_sentinel):
values = [3, 1, 2, 0, 4]
expected = np.array([0, 1, 2, 3, 4])
# out of bound indices
codes = [0, 101, 102, 2, 3, 0, 99, 4]
result, result_codes = safe_sort(values, codes, na_sentinel=na_sentinel)
expected_codes = np.array(
[3, na_sentinel, na_sentinel, 2, 0, 3, na_sentinel, 4], dtype=np.intp
)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_codes, expected_codes)
def test_mixed_integer(self):
values = np.array(["b", 1, 0, "a", 0, "b"], dtype=object)
result = safe_sort(values)
expected = np.array([0, 0, 1, "a", "b", "b"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
values = np.array(["b", 1, 0, "a"], dtype=object)
codes = [0, 1, 2, 3, 0, -1, 1]
result, result_codes = safe_sort(values, codes)
expected = np.array([0, 1, "a", "b"], dtype=object)
expected_codes = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
tm.assert_numpy_array_equal(result_codes, expected_codes)
def test_mixed_integer_from_list(self):
values = ["b", 1, 0, "a", 0, "b"]
result = safe_sort(values)
expected = np.array([0, 0, 1, "a", "b", "b"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_unsortable(self):
# GH 13714
arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object)
msg = (
"unorderable types: .* [<>] .*"
"|" # the above case happens for numpy < 1.14
"'[<>]' not supported between instances of .*"
)
with pytest.raises(TypeError, match=msg):
safe_sort(arr)
def test_exceptions(self):
with pytest.raises(TypeError, match="Only list-like objects are allowed"):
safe_sort(values=1)
with pytest.raises(TypeError, match="Only list-like objects or None"):
safe_sort(values=[0, 1, 2], codes=1)
with pytest.raises(ValueError, match="values should be unique"):
safe_sort(values=[0, 1, 2, 1], codes=[0, 1])
def test_extension_array(self):
# a = array([1, 3, np.nan, 2], dtype='Int64')
a = array([1, 3, 2], dtype="Int64")
result = safe_sort(a)
# expected = array([1, 2, 3, np.nan], dtype='Int64')
expected = array([1, 2, 3], dtype="Int64")
tm.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize("verify", [True, False])
@pytest.mark.parametrize("na_sentinel", [-1, 99])
def test_extension_array_codes(self, verify, na_sentinel):
a = array([1, 3, 2], dtype="Int64")
result, codes = safe_sort(
a, [0, 1, na_sentinel, 2], na_sentinel=na_sentinel, verify=verify
)
expected_values = array([1, 2, 3], dtype="Int64")
expected_codes = np.array([0, 2, na_sentinel, 1], dtype=np.intp)
tm.assert_extension_array_equal(result, expected_values)
tm.assert_numpy_array_equal(codes, expected_codes)
def test_mixed_str_nan():
values = np.array(["b", np.nan, "a", "b"], dtype=object)
result = safe_sort(values)
expected = np.array([np.nan, "a", "b", "b"], dtype=object)
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/misc/findobj_demo.py | 1 | 1660 | """
============
Findobj Demo
============
Recursively find all objects that match some criteria
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.text as text
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
a = np.arange(0, 3, .02)
b = np.arange(0, 3, .02)
c = np.exp(a)
d = c[::-1]
fig, ax = plt.subplots()
plt.plot(a, c, 'k--', a, d, 'k:', a, c + d, 'k')
plt.legend(('Model length', 'Data length', 'Total message length'),
loc='upper center', shadow=True)
plt.ylim([-1, 20])
plt.grid(False)
plt.xlabel('Model complexity --->')
plt.ylabel('Message length --->')
plt.title('Minimum Message Length')
# match on arbitrary function
def myfunc(x):
return hasattr(x, 'set_color') and not hasattr(x, 'set_facecolor')
for o in fig.findobj(myfunc):
o.set_color('blue')
# match on class instances
for o in fig.findobj(text.Text):
o.set_fontstyle('italic')
pltshow(plt)
| mit |
dyermd/legos | marketing/temp.py | 1 | 1914 | #generate all the plots for the market project
from optparse import OptionParser
import matplotlib.pyplot as plt
__author__ = 'mattdyer'
# add labels to a plot
# @param points The plot object
# @param axis The axis object
def autolabel(points, axis, data):
# attach some text labels
for i, point in enumerate(points):
height = float(point.get_height())
axis.text(point.get_x()+point.get_width()/2., 1.05*height, '%.1f'%data[i],
ha='center', va='bottom')
#start here when the script is launched
if (__name__ == '__main__'):
#set up the option parser
parser = OptionParser()
#add the options to parse
parser.add_option('-o', '--output', dest='output', help='The output directory')
(options, args) = parser.parse_args()
############
#Figure X - histogram
############
fix, ax1 = plt.subplots()
data = [4.30 , 5.37 , 11.11 , 13.25 , 13.25 , 18.99 , 21.14 , 21.14 , 21.49 , 22.57 , 23.64 , 23.64 , 24.00 , 25.08 , 25.79 , 30.09 , 35.11 , 36.18 , 36.90 , 36.90 , 39.76 , 40.12 , 40.12 , 40.12 , 42.27 , 44.06 , 46.93 , 47.29 , 47.64 , 48.36 , 49.08 , 51.23 , 51.94 , 52.66 , 53.02 , 58.75 , 59.11 , 59.11 , 60.18 , 60.54 , 61.97 , 62.33 , 64.48 , 67.71 , 69.85 , 72.00 , 72.00 , 74.51 , 74.87 , 77.02 , 77.38 , 78.09 , 80.24 , 80.96 , 83.11 , 86.69 , 87.77 , 88.48 , 90.63 , 90.99 , 91.71 , 92.78 , 92.78 , 94.21 , 94.93 , 97.08 , 97.80 , 97.80 , 101.74 , 103.53 , 103.89 , 116.78 , 117.50 , 117.86 , 121.44 , 121.44 , 123.95 , 150.81 , 151.89 , 152.96 , 161.56 , 164.78 , 169.80 , 174.82 , 193.08 , 195.23 , 227.47 , 248.61 , 112.84, 135.05, 142.57, 176.25, 179.47, 183.05, 193.80, 195.95, 207.41, 208.13, 239.65, 270.82, 279.42, 290.16, 312.37]
ax1.hist(data, color='b', alpha=0.4)
ax1.set_xlabel('Technical Fee ($)')
ax1.set_ylabel('CPT Codes')
plt.savefig('%s/figureX.png' % (options.output))
#plt.show() | gpl-2.0 |
itu-oss-project-team/oss-github-analysis-project | github_analysis_tool/analyzer/classification.py | 1 | 6943 | import collections
import numpy as np
import os.path
from sklearn import neighbors
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.model_selection import *
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from github_analysis_tool.services.database_service import DatabaseService
from github_analysis_tool.analyzer.analysis_utilities import AnalysisUtilities
class Classification:
def __init__(self):
self.__analysis_utilities = AnalysisUtilities()
self.__database_service = DatabaseService()
def set_language_labels(self, df):
threshold = 3
headers, repos, observations = self.__analysis_utilities.decompose_df(df)
language_counts = {}
repo_labels = {}
# Which languages should be treated as one
language_groups = [["JavaScript", "TypeScript", "CoffeeScript"]]
# {"language_a" : "language_a,language_b,language_c"}
group_language_mapping = {}
# Fill language -> group mapping
for language_group in language_groups:
group_label = '-'.join(language_group)
for language in language_group:
group_language_mapping[language] = group_label
# Let's find out languages of all repos and repo counts of languages with taking care of lang groups
for repo in repos:
repo_lang = self.__database_service.get_language_by_repo_full_name(repo)
if repo_lang in group_language_mapping:
# This language is a part of group use group label
repo_labels[repo] = group_language_mapping[repo_lang]
else:
repo_labels[repo] = repo_lang
if repo_labels[repo] not in language_counts:
language_counts[repo_labels[repo]] = 1
else:
language_counts[repo_labels[repo]] += 1
# Find languages with repo counts lower than our threshold
ignored_languages = [language for language in language_counts if language_counts[language] < threshold]
# Find repos with ignored languages
ignored_repos = [repo for repo in repos if repo_labels[repo] in ignored_languages]
# We should no longer keep language information of ignored repos
for repo in ignored_repos:
repo_labels.pop(repo, None)
labels = []
for repo in repo_labels:
labels.append(repo_labels[repo])
return labels, repo_labels, ignored_repos
def set_two_class_language_labels(self, df):
headers, repos, observations = self.__analysis_utilities.decompose_df(df)
labels = []
repo_labels = collections.OrderedDict()
for repo in repos:
language = self.__database_service.get_language_by_repo_full_name(repo)
if language == "JavaScript" or language == "TypeScript" or language == "CoffeeScript" \
or language == "HTML" or language == "CSS" or language == "PHP":
label = "Web"
else:
label = "Non-Web"
repo_labels[repo] = label
labels.append(label)
return labels, repo_labels, [] # No ignored repos
def set_star_labels(self, df):
headers, repos, observations = self.__analysis_utilities.decompose_df(df)
labels = []
repo_labels = collections.OrderedDict()
for repo in repos:
result = self.__database_service.get_repo_by_full_name(repo)
stars = result["stargazers_count"]
if stars > 50000:
label = "50k+"
elif 30000 <= stars < 50000:
label = "30k-50k"
elif 20000 <= stars < 30000:
label = "20k-30k"
elif 15000 <= stars < 20000:
label = "15k-20k"
elif stars < 15000:
label = "15k-"
repo_labels[repo] = label
labels.append(label)
return labels, repo_labels, [] # No ignored repos
def set_no_of_commits_labels(self, df):
headers, repos, observations = self.__analysis_utilities.decompose_df(df)
labels = []
repo_labels = collections.OrderedDict()
for repo in repos:
result = self.__database_service.get_repo_stats(repo_full_name=repo)
no_of_commits = result["no_of_commits"]
if no_of_commits >= 20000:
label = "20k+"
elif 10000 <= no_of_commits < 20000:
label = "10k-20k"
elif 5000 <= no_of_commits < 10000:
label = "5k-10k"
elif 2500 <= no_of_commits < 5000:
label = "2.5k-5k"
elif 1000 <= no_of_commits < 2500:
label = "1k-2.5k"
elif 500 <= no_of_commits < 1000:
label = "500-1k"
else:
label = "500-"
repo_labels[repo] = label
labels.append(label)
return labels, repo_labels, [] # No ignored repos
def __retrieve_confusion_matrix(self, labels, predicted, out_file_pre_path):
success = accuracy_score(labels, predicted, normalize=False)
fail = len(labels) - success
ratio = accuracy_score(labels, predicted)
print(success, fail, ratio)
label_names = np.unique(labels)
conf_matrix = confusion_matrix(labels, predicted, label_names)
#self.__analysis_utilities.export_confusion_matrix(out_file_pre_path, conf_matrix,
#label_names, success, fail)
return conf_matrix
def knn_classify(self, out_folder_path, training_set, test_set, training_labels, test_labels, k=1, msg=""):
print("message: " + msg)
out_file_pre_path = os.path.join(out_folder_path, "knn" + str(k) + msg) # Any output file should extend this path
knn_classifier = neighbors.KNeighborsClassifier(k, weights='distance')
knn_classifier.fit(training_set, training_labels)
predicted = knn_classifier.predict(test_set)
success = accuracy_score(test_labels, predicted, normalize=False)
conf_matrix = self.__retrieve_confusion_matrix(test_labels, predicted, out_file_pre_path)
return conf_matrix, success
def classify(self, classifier, clsf_name, out_folder_path, training_set, training_labels, test_set, test_labels, msg=""):
print(clsf_name + " : " + msg)
out_file_pre_path = os.path.join(out_folder_path, clsf_name + msg) # Any output file should extend this path
classifier.fit(training_set, training_labels)
predicted = classifier.predict(test_set)
success = accuracy_score(test_labels, predicted, normalize=False)
conf_matrix = self.__retrieve_confusion_matrix(test_labels, predicted, out_file_pre_path)
return conf_matrix, success
| mit |
zooniverse/aggregation | experimental/penguins/clusterAnalysis/distance_.py | 2 | 3492 | #!/usr/bin/env python
__author__ = 'greghines'
import numpy as np
import os
import sys
import cPickle as pickle
import math
import matplotlib.pyplot as plt
import pymongo
import urllib
import matplotlib.cbook as cbook
if os.path.exists("/home/ggdhines"):
sys.path.append("/home/ggdhines/PycharmProjects/reduction/experimental/clusteringAlg")
else:
sys.path.append("/home/greg/github/reduction/experimental/clusteringAlg")
#from divisiveDBSCAN import DivisiveDBSCAN
#from divisiveDBSCAN_multi import DivisiveDBSCAN
#from clusterCompare import metric,metric2
if os.path.exists("/home/ggdhines"):
base_directory = "/home/ggdhines"
else:
base_directory = "/home/greg"
penguins = pickle.load(open(base_directory+"/Databases/penguins_vote__.pickle","rb"))
#does this cluster have a corresponding cluster in the gold standard data?
#ie. does this cluster represent an actual penguin?
# #user penguins for first image - with 5 images
# print len(penguins[5][0])
# #user data
# print penguins[5][0][0]
# #gold standard data
# #print penguins[5][0][1]
#
# #users who annotated the first "penguin" in the first image
# print penguins[5][0][0][0][1]
# #and their corresponds points
# print penguins[5][0][0][0][0]
client = pymongo.MongoClient()
db = client['penguin_2014-10-22']
subject_collection = db["penguin_subjects"]
lowest_cluster = float("inf")
highest_cluster = -float('inf')
#print gold_standard
#RESET
max_users = 20
cluster_list = []
image = penguins[max_users][0]
for image in penguins[max_users]:
#first - create a list of ALL users - so we can figure out who has annotated a "penguin" or hasn't
zooniverse_id = image[0]
for cluster in image[1]:
X = np.mean(zip(*cluster[0])[0])
Y = np.mean(zip(*cluster[0])[1])
cluster_list.append((X,Y,cluster[1]))
subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
url = subject["location"]["standard"]
object_id= str(subject["_id"])
image_path = base_directory+"/Databases/penguins/images/"+object_id+".JPG"
if not(os.path.isfile(image_path)):
urllib.urlretrieve(url, image_path)
image_file = cbook.get_sample_data(base_directory + "/Databases/penguins/images/"+object_id+".JPG")
image = plt.imread(image_file)
fig, ax = plt.subplots()
im = ax.imshow(image)
to_plot = False
for i in range(len(cluster_list)):
for j in range(i+1,len(cluster_list)):
users_1 = cluster_list[i][2]
users_2 = cluster_list[j][2]
overlap = len([u for u in users_1 if (u in users_2)])
if overlap == 1:
x_1,y_1 = cluster_list[i][0],cluster_list[i][1]
x_2,y_2 = cluster_list[j][0],cluster_list[j][1]
first_dist = math.sqrt((x_1-x_2)**2 + (y_1-y_2)**2)
min_dist = float("inf")
#find the closest or next-closest cluster
for k in range(len(cluster_list)):
if k in [i,j]:
continue
x_3,y_3 = cluster_list[k][0],cluster_list[k][1]
dist = math.sqrt((x_1-x_3)**2 + (y_1-y_3)**2)
min_dist = min(min_dist,dist)
if first_dist< (min_dist):
to_plot = True
print first_dist,min_dist
plt.plot((x_1,x_2),(y_1,y_2))
if to_plot:
plt.show()
plt.close()
else:
plt.close()
print "====" | apache-2.0 |
sstoma/CellProfiler | cellprofiler/modules/colortogray.py | 2 | 21721 | '''
<b> Color to Gray</b> converts an image with three color channels to a set of individual
grayscale images.
<hr>
This module converts RGB (Red, Green, Blue) color images to grayscale. All channels
can be merged into one grayscale image (<i>Combine</i>), or each channel
can be extracted into a separate grayscale image (<i>Split</i>). If you use <i>Combine</i>,
the relative weights will adjust the contribution of the colors relative to each other.<br>
<br>
<i>Note:</i>All <b>Identify</b> modules require grayscale images.
<p>See also <b>GrayToColor</b>.
'''
# CellProfiler is distributed under the GNU General Public License.
# See the accompanying file LICENSE for details.
#
# Copyright (c) 2003-2009 Massachusetts Institute of Technology
# Copyright (c) 2009-2015 Broad Institute
#
# Please see the AUTHORS file for credits.
#
# Website: http://www.cellprofiler.org
import numpy as np
import re
import matplotlib.colors
import cellprofiler.cpmodule as cpm
import cellprofiler.settings as cps
import cellprofiler.cpimage as cpi
COMBINE = "Combine"
SPLIT = "Split"
CH_RGB = "RGB"
CH_HSV = "HSV"
CH_CHANNELS = "Channels"
SLOT_CHANNEL_COUNT = 19
SLOT_FIXED_COUNT = 20
SLOTS_PER_CHANNEL = 3
SLOT_CHANNEL_CHOICE = 0
class ColorToGray(cpm.CPModule):
module_name = "ColorToGray"
variable_revision_number = 3
category = "Image Processing"
def create_settings(self):
self.image_name = cps.ImageNameSubscriber(
"Select the input image",cps.NONE)
self.combine_or_split = cps.Choice(
"Conversion method",
[COMBINE,SPLIT],doc='''
How do you want to convert the color image?
<ul>
<li><i>%(SPLIT)s:</i> Splits the three channels
(red, green, blue) of a color image into three separate grayscale images. </li>
<li><i>%(COMBINE)s</i> Converts a color image to a grayscale
image by combining the three channels (red, green, blue) together.</li>
</ul>'''%globals())
self.rgb_or_channels = cps.Choice(
"Image type", [CH_RGB, CH_HSV, CH_CHANNELS],doc = """
Many images contain color channels other than red, green
and blue. For instance, GIF and PNG formats can have an alpha
channel that encodes transparency. TIF formats can have an arbitrary
number of channels which represent pixel measurements made by
different detectors, filters or lighting conditions. This setting
provides three options to choose from:
<ul>
<li><i>%(CH_RGB)s:</i> The RGB (red,green,blue) color space is the typical model in which color images are stored. Choosing this option
will split the image into any of the red, green and blue component images.</li>
<li><i>%(CH_HSV)s:</i>The HSV (hue, saturation, value) color space is based on more intuitive color characteristics as
tint, shade and tone. Choosing
this option will split the image into any of the hue, saturation, and value component images.</li>
<li><i>%(CH_CHANNELS)s:</i>This is a more complex model for images which involve more than three chnnels.</li>
</ul>""" % globals())
# The following settings are used for the combine option
self.grayscale_name = cps.ImageNameProvider(
"Name the output image","OrigGray")
self.red_contribution = cps.Float(
"Relative weight of the red channel",
1,0,doc='''
<i>(Used only when combining channels)</i><br>
Relative weights: If all relative weights are equal, all three
colors contribute equally in the final image. To weight colors relative
to each other, increase or decrease the relative weights.''')
self.green_contribution = cps.Float(
"Relative weight of the green channel",
1,0,doc='''
<i>(Used only when combining channels)</i><br>
Relative weights: If all relative weights are equal, all three
colors contribute equally in the final image. To weight colors relative
to each other, increase or decrease the relative weights.''')
self.blue_contribution = cps.Float(
"Relative weight of the blue channel",
1,0,doc='''
<i>(Used only when combining channels)</i><br>
Relative weights: If all relative weights are equal, all three
colors contribute equally in the final image. To weight colors relative
to each other, increase or decrease the relative weights.''')
# The following settings are used for the split RGB option
self.use_red = cps.Binary('Convert red to gray?',True)
self.red_name = cps.ImageNameProvider( 'Name the output image', "OrigRed")
self.use_green = cps.Binary('Convert green to gray?',True)
self.green_name = cps.ImageNameProvider('Name the output image', "OrigGreen")
self.use_blue = cps.Binary('Convert blue to gray?',True)
self.blue_name = cps.ImageNameProvider('Name the output image', "OrigBlue")
# The following settings are used for the split HSV ption
self.use_hue = cps.Binary('Convert hue to gray?',True)
self.hue_name = cps.ImageNameProvider('Name the output image',"OrigHue")
self.use_saturation = cps.Binary('Convert saturation to gray?',True)
self.saturation_name = cps.ImageNameProvider('Name the output image', "OrigSaturation")
self.use_value = cps.Binary('Convert value to gray?',True)
self.value_name = cps.ImageNameProvider('Name the output image',"OrigValue")
# The alternative model:
self.channels = []
self.add_channel(False)
self.channel_button = cps.DoSomething(
"","Add another channel", self.add_channel)
self.channel_count = cps.HiddenCount(self.channels, "Channel count")
channel_names = (["Red: 1", "Green: 2", "Blue: 3", "Alpha: 4"] +
[str(x) for x in range(5,20)])
def add_channel(self, can_remove = True):
'''Add another channel to the channels list'''
group = cps.SettingsGroup()
group.can_remove = can_remove
group.append("channel_choice", cps.Choice(
"Channel number", self.channel_names,
self.channel_names[len(self.channels) % len(self.channel_names)],doc = """
This setting chooses a channel to be processed.
<i>Red: 1</i> is the first channel in a .TIF or the red channel
in a traditional image file. <i>Green: 2</i> and <i>Blue: 3</i>
are the second and third channels of a TIF or the green and blue
channels in other formats. <i>Alpha: 4</i> is the transparency
channel for image formats that support transparency and is
channel # 4 for a .TIF file.
<b>ColorToGray</b> will fail to process an image if you select
a channel that is not supported by that image, for example, "5"
for a .PNG file"""))
group.append("contribution", cps.Float(
"Relative weight of the channel", 1,0,doc='''
<i>(Used only when combining channels)</i><br>
Relative weights: If all relative weights are equal, all three
colors contribute equally in the final image. To weight colors relative
to each other, increase or decrease the relative weights.'''))
group.append("image_name", cps.ImageNameProvider(
"Image name", value="Channel%d" % (len(self.channels)+1),doc = """
This is the name of the grayscale image that holds
the image data from the chosen channel."""))
if group.can_remove:
group.append("remover", cps.RemoveSettingButton(
"", "Remove this channel", self.channels, group))
self.channels.append(group)
def visible_settings(self):
"""Return either the "combine" or the "split" settings"""
vv = [self.image_name, self.combine_or_split]
if self.should_combine():
vv += [self.grayscale_name, self.rgb_or_channels]
if self.rgb_or_channels in (CH_RGB, CH_HSV):
vv.extend([self.red_contribution,
self.green_contribution, self.blue_contribution])
else:
for channel in self.channels:
vv += [channel.channel_choice, channel.contribution]
if channel.can_remove:
vv += [channel.remover]
vv += [self.channel_button]
else:
vv += [ self.rgb_or_channels ]
if self.rgb_or_channels == CH_RGB:
for v_use,v_name in ((self.use_red ,self.red_name),
(self.use_green,self.green_name),
(self.use_blue ,self.blue_name)):
vv.append(v_use)
if v_use.value:
vv.append(v_name)
elif self.rgb_or_channels == CH_HSV:
for v_use,v_name in ((self.use_hue ,self.hue_name),
(self.use_saturation,self.saturation_name),
(self.use_value ,self.value_name)):
vv.append(v_use)
if v_use.value:
vv.append(v_name)
else:
for channel in self.channels:
vv += [channel.channel_choice, channel.image_name]
if channel.can_remove:
vv += [channel.remover]
vv += [self.channel_button]
return vv
def settings(self):
"""Return all of the settings in a consistent order"""
return [self.image_name, self.combine_or_split,
self.rgb_or_channels,
self.grayscale_name, self.red_contribution,
self.green_contribution, self.blue_contribution,
self.use_red, self.red_name,
self.use_green, self.green_name,
self.use_blue, self.blue_name,
self.use_hue, self.hue_name,
self.use_saturation, self.saturation_name,
self.use_value, self.value_name,
self.channel_count
] + sum([ [channel.channel_choice, channel.contribution,
channel.image_name] for channel in self.channels],
[])
def should_combine(self):
"""True if we are supposed to combine RGB to gray"""
return self.combine_or_split == COMBINE
def should_split(self):
"""True if we are supposed to split each color into an image"""
return self.combine_or_split == SPLIT
def validate_module(self,pipeline):
"""Test to see if the module is in a valid state to run
Throw a ValidationError exception with an explanation if a module is not valid.
Make sure that we output at least one image if split
"""
if self.should_split():
if (self.rgb_or_channels == CH_RGB) and not any([self.use_red.value, self.use_blue.value, self.use_green.value]):
raise cps.ValidationError("You must output at least one of the color images when in split mode",
self.use_red)
if (self.rgb_or_channels == CH_HSV) and not any([self.use_hue.value, self.use_saturation.value, self.use_value.value]):
raise cps.ValidationError("You must output at least one of the color images when in split mode",
self.use_hue)
def channels_and_contributions(self):
"""Return tuples of channel indexes and their relative contributions
Used when combining channels to find the channels to combine
"""
if self.rgb_or_channels in (CH_RGB,CH_HSV):
return [ (i, contribution.value) for i,contribution in enumerate(
(self.red_contribution, self.green_contribution,
self.blue_contribution))]
return [ (self.channel_names.index(channel.channel_choice),
channel.contribution.value) for channel in self.channels ]
@staticmethod
def get_channel_idx_from_choice(choice):
'''Convert one of the channel choice strings to a channel index
choice - one of the strings from channel_choices or similar
(string ending in a one-based index)
returns the zero-based index of the channel.
'''
return int(re.search("[0-9]+$", choice).group()) - 1
def channels_and_image_names(self):
"""Return tuples of channel indexes and the image names for output"""
if self.rgb_or_channels == CH_RGB:
rgb = ((self.use_red.value, self.red_name.value, "Red"),
(self.use_green.value, self.green_name.value, "Green"),
(self.use_blue.value, self.blue_name.value, "Blue"))
return [ (i, name, title) for i, (use_it, name, title)
in enumerate(rgb) if use_it ]
if self.rgb_or_channels == CH_HSV:
hsv = ((self.use_hue.value, self.hue_name.value, "Hue"),
(self.use_saturation.value, self.saturation_name.value, "Saturation"),
(self.use_value.value, self.value_name.value, "Value"))
return [ (i, name, title) for i, (use_it, name, title)
in enumerate(hsv) if use_it ]
result = []
for channel in self.channels:
choice = channel.channel_choice.value
channel_idx = self.get_channel_idx_from_choice(choice)
result.append((channel_idx, channel.image_name.value,
channel.channel_choice.value))
return result
def run(self,workspace):
"""Run the module
pipeline - instance of CellProfiler.Pipeline for this run
workspace - the workspace contains:
image_set - the images in the image set being processed
object_set - the objects (labeled masks) in this image set
measurements - the measurements for this run
frame - display within this frame (or None to not display)
"""
image = workspace.image_set.get_image(self.image_name.value,
must_be_color=True)
if self.should_combine():
self.run_combine(workspace, image)
else:
self.run_split(workspace, image)
def display(self, workspace, figure):
if self.should_combine():
self.display_combine(workspace, figure)
else:
self.display_split(workspace, figure)
def run_combine(self, workspace, image):
"""Combine images to make a grayscale one
"""
input_image = image.pixel_data
channels, contributions = zip(*self.channels_and_contributions())
denominator = sum(contributions)
channels = np.array(channels, int)
contributions = np.array(contributions) / denominator
output_image = np.sum(input_image[:, :, channels] *
contributions[np.newaxis, np.newaxis, :], 2)
image = cpi.Image(output_image,parent_image=image)
workspace.image_set.add(self.grayscale_name.value,image)
workspace.display_data.input_image = input_image
workspace.display_data.output_image = output_image
def display_combine(self, workspace, figure):
import matplotlib.cm
input_image = workspace.display_data.input_image
output_image = workspace.display_data.output_image
figure.set_subplots((1, 2))
figure.subplot_imshow(0, 0, input_image,
title = "Original image: %s"%(self.image_name))
figure.subplot_imshow(0, 1, output_image,
title = "Grayscale image: %s"%(self.grayscale_name),
colormap = matplotlib.cm.Greys_r,
sharexy = figure.subplot(0,0))
def run_split(self, workspace, image):
"""Split image into individual components
"""
input_image = image.pixel_data
disp_collection = []
if self.rgb_or_channels in (CH_RGB,CH_CHANNELS):
for index, name, title in self.channels_and_image_names():
output_image = input_image[:,:,index]
workspace.image_set.add(name, cpi.Image(output_image,parent_image=image))
disp_collection.append([output_image, title])
elif self.rgb_or_channels == CH_HSV:
output_image = matplotlib.colors.rgb_to_hsv(input_image)
for index, name, title in self.channels_and_image_names():
workspace.image_set.add(name, cpi.Image(output_image[:,:,index],parent_image=image))
disp_collection.append([output_image[:,:,index], title])
workspace.display_data.input_image = input_image
workspace.display_data.disp_collection = disp_collection
def display_split(self, workspace, figure):
import matplotlib.cm
input_image = workspace.display_data.input_image
disp_collection = workspace.display_data.disp_collection
ndisp = len(disp_collection)
if ndisp == 1:
subplots = (1,2)
else:
subplots = (2,2)
figure.set_subplots(subplots)
figure.subplot_imshow(0, 0, input_image,
title = "Original image")
if ndisp == 1:
layout = [(0,1)]
elif ndisp == 2:
layout = [ (1,0),(0,1)]
else:
layout = [(1,0),(0,1),(1,1)]
for xy, disp in zip(layout, disp_collection):
figure.subplot_imshow(xy[0], xy[1], disp[0],
title = "%s image"%(disp[1]),
colormap = matplotlib.cm.Greys_r,
sharexy = figure.subplot(0,0))
def prepare_settings(self, setting_values):
'''Prepare the module to receive the settings
setting_values - one string per setting to be initialized
Adjust the number of channels to match the number indicated in
the settings.
'''
del self.channels[1:]
nchannels = int(setting_values[SLOT_CHANNEL_COUNT])
while len(self.channels) < nchannels:
self.add_channel()
def upgrade_settings(self,
setting_values,
variable_revision_number,
module_name,
from_matlab):
if from_matlab and variable_revision_number == 1:
new_setting_values = [ setting_values[0], # image name
setting_values[1], # combine or split
# blank slot for text: "Combine options"
setting_values[3], # grayscale name
setting_values[4], # red contribution
setting_values[5], # green contribution
setting_values[6] # blue contribution
# blank slot for text: "Split options"
]
for i in range(3):
vv = setting_values[i+8]
use_it = ((vv == cps.DO_NOT_USE or vv == "N") and cps.NO) or cps.YES
new_setting_values.append(use_it)
new_setting_values.append(vv)
setting_values = new_setting_values
module_name = self.module_class()
variable_revision_number = 1
from_matlab = False
if not from_matlab and variable_revision_number == 1:
#
# Added rgb_or_channels at position # 2, added channel count
# at end.
#
setting_values = (
setting_values[:2] + [ CH_RGB ] + setting_values[2:] +
[ "1", "Red: 1", "1", "Channel1"])
variable_revision_number = 2
if not from_matlab and variable_revision_number == 2:
#
# Added HSV settings
#
setting_values = (setting_values[:13] +
[cps.YES,"OrigHue",cps.YES,"OrigSaturation",cps.YES,"OrigValue"] +
setting_values[13:])
variable_revision_number = 3
#
# Standardize the channel choices
#
setting_values = list(setting_values)
nchannels = int(setting_values[SLOT_CHANNEL_COUNT])
for i in range(nchannels):
idx = SLOT_FIXED_COUNT + SLOT_CHANNEL_CHOICE + i * SLOTS_PER_CHANNEL
channel_idx = self.get_channel_idx_from_choice(setting_values[idx])
setting_values[idx] = self.channel_names[channel_idx]
return setting_values, variable_revision_number, from_matlab
| gpl-2.0 |
xavierfav/freesound-python | manager.py | 1 | 75825 | """
Upgrade of the python client for freesound
Find the API documentation at http://www.freesound.org/docs/api/.
This lib provides method for managing files and data with a local data storage
"""
import sys
sys.path.append('/home/xavier/Documents/dev/freesound-python/')
import copy
import freesound
import os
import json
import ijson
import simplejson
from numpy import array
import numpy as np
from functools import reduce
import cPickle
from urllib2 import URLError
reload(sys)
sys.setdefaultencoding("utf-8")
import subprocess
import ast
from time import sleep, gmtime, strftime
import psycopg2
import requests
from math import ceil
import datetime
import csv
from sklearn import preprocessing
from scipy import spatial
import scipy
from sklearn.metrics.pairwise import cosine_similarity
import re
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer
from stop_words import get_stop_words
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim.models.word2vec import Word2Vec
sys.path.append('/home/xavier/Documents/freesound-data/query flow') #this do not allow to 'run <script.py>' to run a script that is in the folder from ipython...
import pandas as pd
import operator
import networkx as nx
from sklearn.decomposition import LatentDirichletAllocation
LENGTH_BAR = 30 # length of the progress bar
class SettingsSingleton(object):
"""
Singleton object pattern to access/modify settings from everywhere
"""
class __OnlyOne:
def __init__(self):
self.local_sounds = []
self.local_analysis = []
self.local_baskets = []
self.local_baskets_pickle = []
self.autoSave = True
instance = None
def __new__(cls): # __new__ always a classmethod
if not SettingsSingleton.instance:
SettingsSingleton.instance = SettingsSingleton.__OnlyOne()
return SettingsSingleton.instance
def __getattr__(self, name):
return getattr(self.instance, name)
def __setattr__(self, name):
return setattr(self.instance, name)
#_________________________________________________________________#
# Client class #
#_________________________________________________________________#
class Client(freesound.FreesoundClient):
"""
Create FreesoundClient and set authentication
The first time you create a client, it will ask for your Freesound id, api key and set up authentication
>>> import manager
>>> c = manager.Client()
Enter your client id: xxx
Enter your api key: xxx
...
If Freesound server is down, you can create a client without authentication:
>>> c = manager.Client(authentication = False)
"""
def __init__(self, authentication=True):
self._scan_folder()
if authentication:
self._init_oauth()
# ________________________________________________________________________#
#____________________________ local folders ______________________________#
def _local_(self, what):
settings = SettingsSingleton()
return getattr(settings, what)
@property
def local_baskets(self):
return self._local_('local_baskets')
@property
def local_sounds(self):
return self._local_('local_sounds')
@property
def local_analysis(self):
return self._local_('local_analysis')
@property
def local_analysis_stats(self):
return self._local_('local_analysis_stats')
@property
def local_baskets_pickle(self):
return self._local_('local_baskets_pickle')
#________________________________________________________________________#
# __________________________ Users functions ____________________________#
def my_text_search(self, **param):
"""
Call text_search method from freesound.py and add all the defaults fields and page size parameters
TODO : add default param more flexible (store in a param file - add the api_key in a .py file)
>>> import manager
>>> c = manager.Client()
>>> result = c.my_text_search(query="wind")
"""
fields = 'id,'
try:
fields += param['fields']
param.pop('fields')
except:
pass
results_pager = self.text_search(fields=fields, page_size=150, **param)
#self.text_search(fields="id,name,url,tags,description,type,previews,filesize,bitrate,bitdepth,duration,samplerate,username,comments,num_comments,analysis_frames",page_size=150,**param)
return results_pager
def my_get_sound(self,idToLoad):
"""
Use this method to get a sound from local or freesound if not in local
>>> sound = c.my_get_sound(id)
"""
settings = SettingsSingleton()
if idToLoad not in settings.local_sounds:
sound = self._load_sound_freesound(idToLoad)
if settings.autoSave:
self._save_sound_json(sound) # save it
else:
sound = self._load_sound_json(idToLoad)
return sound
def my_get_sounds(self,idsToLoad):
"""
Use this method to get many sounds from local or freesound
"""
sounds = []
nbSound = len(idsToLoad)
Bar = ProgressBar(nbSound,LENGTH_BAR,'Loading sounds')
Bar.update(0)
for i in range(nbSound):
sounds.append(self.my_get_sound(idsToLoad[i]))
Bar.update(i+1)
return sounds
def my_get_analysis(self, idToLoad, descriptor):
"""
Use this method to get all frames from an analysis type 'descriptor'
>>> analysis = c.my_get_analysis(id)
"""
settings = SettingsSingleton()
analysis = None
if idToLoad not in settings.local_analysis:
allAnalysis = self._load_analysis_freesound(idToLoad)
if settings.autoSave:
self._save_analysis_json(allAnalysis, idToLoad)
if allAnalysis:
splitDescriptors = descriptor.split(".")
analysis = allAnalysis
for desc in splitDescriptors:
analysis = getattr(analysis, desc)
else:
analysis = self._load_analysis_descriptor_json(idToLoad, descriptor)
return analysis
# TODO: this does not work
def my_get_analysiss(self, idsToLoad):
"""
TODO : adapt it to return an Analysis object
Use this method to get many analysis from local or freesound
"""
analysis = []
nbAnalysis = len(idsToLoad)
Bar = ProgressBar(nbAnalysis,LENGTH_BAR,'Loading sounds')
Bar.update(0)
for i in range(nbAnalysis):
analysis.append(self.my_get_analysis(idsToLoad[i]))
Bar.update(i+1)
return analysis
# TODO: dont load the sound is the sound is givent as arguement istead of the id
def my_get_analysis_stats(self, idToLoad):
settings = SettingsSingleton()
analysis = None
if idToLoad not in settings.local_analysis_stats:
analysis = self._load_analysis_stats_freesound(idToLoad)
if settings.autoSave:
self._save_analysis_stats_json(analysis, idToLoad)
else:
analysis = self._load_analysis_stats_json(idToLoad)
return analysis
def my_get_one_analysis_stats(self, idToLoad, descriptor):
settings = SettingsSingleton()
analysis = None
if idToLoad not in settings.local_analysis_stats:
allAnalysis = self._load_analysis_stats_freesound(idToLoad)
if settings.autoSave:
self._save_analysis_stats_json(allAnalysis, idToLoad)
else:
allAnalysis = self._load_analysis_stats_json(idToLoad)
if allAnalysis:
splitDescriptors = descriptor.split(".")
analysis = allAnalysis
for desc in splitDescriptors:
analysis = getattr(analysis, desc)
return analysis
def new_basket(self):
"""
Create a new Basket
"""
basket = Basket(self)
return basket
def load_basket_pickle(self, name):
"""
Load a basket from pickle
"""
settings = SettingsSingleton()
if name and name in settings.local_baskets_pickle:
nameFile = 'baskets_pickle/' + name
with open(nameFile) as infile:
obj = cPickle.load(infile)
obj.parent_client = self
obj._actualize()
return obj
else:
print '%s basket does not exist' % name
@staticmethod
def save_pickle(obj, name, path=''):
"""
Use this method to save an object with pickle
"""
nameFile = path + name
with open(nameFile, 'w') as outfile:
cPickle.dump(obj, outfile)
@staticmethod
def load_pickle(nameFile):
"""
Use thise method to load an object from pickle
"""
with open(nameFile) as infile:
obj = cPickle.load(infile)
return obj
@staticmethod
def save_csv(obj, name, path='/csv/'):
nameFile = path + name + '.csv'
with open(nameFile, 'w') as out:
csv_out = csv.writer(out)
for row in obj:
csv_out.writerow(row)
# ________________________________________________________________________#
# _______________________ Private functions ______________________________#
# ____ save/load json local/Freesound, authentication, scan folder _______#
def _save_sound_json(self, sound):
"""
sSve a sound into a json file
TODO : add overwrite option...
"""
settings = SettingsSingleton()
if sound and not(sound.id in settings.local_sounds):
nameFile = 'sounds/' + str(sound.id) + '.json'
with open(nameFile, 'w') as outfile:
json.dump(sound.as_dict(), outfile)
settings.local_sounds.append(int(sound.id))
settings.local_sounds.sort()
def _load_sound_json(self, idToLoad):
"""
Load a sound from local json
"""
settings = SettingsSingleton()
if idToLoad in settings.local_sounds:
nameFile = 'sounds/' + str(idToLoad) + '.json'
with open(nameFile) as infile:
sound = freesound.Sound(simplejson.load(infile), self)
return sound
else:
return None
def _load_sound_freesound(self, idToLoad):
count = 0
while 1: # maybe use decorator to add this to all function that can fail sometimes...
count += 1
if count > 4:
print 'sound ' + str(idToLoad) + ' not found (tried 4 times)'
return None
try:
sound = self.get_sound(idToLoad)
return sound
except ValueError:
return None
except URLError as e:
sleep(0.5)
print e, 'id ' + str(idToLoad)
except freesound.FreesoundException as e:
sleep(0.5)
print e, 'id ' + str(idToLoad)
def _save_analysis_json(self, analysis, idSound):
"""
Save an analysis into a json file
TODO : add overwrite option...
"""
settings = SettingsSingleton()
if analysis and not(idSound in settings.local_analysis):
nameFile = 'analysis/' + str(idSound) + '.json'
with open(nameFile, 'w') as outfile:
json.dump(analysis.as_dict(), outfile)
settings.local_analysis.append(int(idSound))
settings.local_analysis.sort()
def _load_analysis_json(self, idToLoad):
"""
Load analysis from local json file
"""
settings = SettingsSingleton()
if idToLoad in settings.local_analysis:
nameFile = 'analysis/' + str(idToLoad) + '.json'
with open(nameFile) as infile:
analysis = freesound.FreesoundObject(simplejson.load(infile),self)
return analysis
else:
return None
def _load_analysis_freesound(self, idToLoad):
"""
Load an analysis file from Freesound database
"""
sound = self.my_get_sound(idToLoad)
try:
allAnalysis = sound.get_analysis_frames()
return allAnalysis
except ValueError:
return None
except freesound.FreesoundException:
return None
except URLError:
return None
def _load_analysis_descriptor_json(self, idToLoad, descriptor):
"""
load analysis frames of a descriptor
TODO : add possible descriptors
"""
analysis = []
settings = SettingsSingleton()
if idToLoad in settings.local_analysis:
nameFile = 'analysis/' + str(idToLoad) + '.json'
with open(nameFile) as infile:
parser = ijson.items(infile, descriptor)
analysis = []
for i in parser:
analysis.append(i)
analysis = array(analysis[0],float)
return analysis
else:
return None
def _save_analysis_stats_json(self, analysis, idSound):
settings = SettingsSingleton()
if analysis and not (idSound in settings.local_analysis_stats):
nameFile = 'analysis_stats/' + str(idSound) + '.json'
with open(nameFile, 'w') as outfile:
json.dump(analysis.as_dict(), outfile)
settings.local_analysis_stats.append(int(idSound))
settings.local_analysis_stats.sort()
def _load_analysis_stats_freesound(self, idToLoad):
"""
Load analysis stats from Freesound
"""
sound = self.my_get_sound(idToLoad)
try:
analysis = sound.get_analysis()
return analysis
except ValueError:
return None
except freesound.FreesoundException:
return None
except URLError:
return None
def _load_analysis_stats_json(self, idToLoad):
"""
Load analysis from local json file
"""
settings = SettingsSingleton()
if idToLoad in settings.local_analysis_stats:
nameFile = 'analysis_stats/' + str(idToLoad) + '.json'
with open(nameFile) as infile:
analysis = freesound.FreesoundObject(json.load(infile), self)
return analysis
else:
return None
@staticmethod
def _scan_folder():
"""
This method is used to scan all content folders
"""
settings = SettingsSingleton()
# Check if the storing folder are here
if not os.path.exists('sounds'):
os.makedirs('sounds')
if not os.path.exists('analysis'):
os.makedirs('analysis')
if not os.path.exists('baskets'):
os.makedirs('baskets')
if not os.path.exists('baskets_pickle'):
os.makedirs('baskets_pickle')
if not os.path.exists('previews'):
os.makedirs('previews')
if not os.path.exists('analysis_stats'):
os.makedirs('analysis_stats')
# create variable with present local sounds & analysis
# (reduce time consumption for function loading json files)
files_sounds = os.listdir('./sounds/')
files_analysis = os.listdir('./analysis/')
files_baskets = os.listdir('./baskets/')
files_baskets_pickle = os.listdir('./baskets_pickle/')
files_analysis_stats = os.listdir('./analysis_stats/')
settings = SettingsSingleton()
settings.local_sounds = []
settings.local_analysis = []
settings.local_baskets = []
settings.local_baskets_pickle = []
settings.local_analysis_stats = []
for i in files_sounds:
settings.local_sounds.append(int(i[:-5]))
for j in files_analysis:
settings.local_analysis.append(int(j[:-5]))
for m in files_baskets:
settings.local_baskets.append(m[:-5])
for n in files_baskets_pickle:
settings.local_baskets_pickle.append(n)
for k in files_analysis_stats:
settings.local_analysis_stats.append(int(k[:-5]))
settings.local_sounds.sort()
settings.local_analysis.sort()
settings.local_analysis_stats.sort()
def _init_oauth(self):
try:
import api_key
reload(api_key)
client_id = api_key.client_id
token = api_key.token
refresh_oauth = api_key.refresh_oauth
print ' Authenticating:\n'
req = 'curl -X POST -d "client_id=' + client_id + '&client_secret=' + token + \
'&grant_type=refresh_token&refresh_token=' + refresh_oauth + '" ' + \
'"https://www.freesound.org/apiv2/oauth2/access_token/"'
output = subprocess.check_output(req, shell=True)
output = ast.literal_eval(output)
access_oauth = output['access_token']
refresh_oauth = output['refresh_token']
self._write_api_key(client_id, token, access_oauth, refresh_oauth)
self.token = token
self.client_id = client_id
self.access_oauth = access_oauth
except ImportError:
client_id = raw_input('Enter your client id: ')
token = raw_input('Enter your api key: ')
code = raw_input('Please go to: https://www.freesound.org/apiv2/oauth2/authorize/?client_id=' + client_id + \
'&response_type=code&state=xyz and enter the ginve code: ')
print '\n Authenticating:\n'
req = 'curl -X POST -d "client_id=' + client_id + '&client_secret=' + token + \
'&grant_type=authorization_code&code=' + code + '" ' + \
'"https://www.freesound.org/apiv2/oauth2/access_token/"'
output = subprocess.check_output(req, shell=True)
output = ast.literal_eval(output)
access_oauth = output['access_token']
refresh_oauth = output['refresh_token']
self._write_api_key(client_id, token, access_oauth, refresh_oauth)
self.token = token
self.client_id = client_id
self.access_oauth = access_oauth
except:
print 'Could not authenticate'
return
self._set_oauth()
print '\n Congrats ! Your are now authenticated \n'
print freesound_rocks_ascii_art
@staticmethod
def _write_api_key(client_id, token, access_oauth, refresh_oauth):
file = open('api_key.py', 'w')
file.write('client_id = "' + client_id + '"')
file.write('\n')
file.write('token = "' + token + '"')
file.write('\n')
file.write('access_oauth = "' + access_oauth + '"')
file.write('\n')
file.write('refresh_oauth = "' + refresh_oauth + '"')
file.close()
def _set_oauth(self):
self.set_token(self.access_oauth, auth_type='oauth')
def _set_token(self):
self.set_token(self.token)
#_________________________________________________________________#
# Analysis class #
#_________________________________________________________________#
class Analysis():
"""
Analysis nested object. Holds all the analysis of many sounds
"""
def __init__(self, json_dict = None):
if not json_dict:
with open('analysis_template.json') as infile:
json_dict = simplejson.load(infile)
self.json_dict = json_dict
def replace_dashes(d):
for k, v in d.items():
if "-" in k:
d[k.replace("-", "_")] = d[k]
del d[k]
if isinstance(v, dict): replace_dashes(v)
replace_dashes(json_dict)
self.__dict__.update(json_dict)
for k, v in json_dict.items():
if isinstance(v, dict):
self.__dict__[k] = Analysis(v)
def rsetattr(self, attr, val):
pre, _, post = attr.rpartition('.')
return setattr(self.rgetattr(pre) if pre else self, post, val)
sentinel = object()
def rgetattr(self, attr, default=sentinel):
if default is self.sentinel:
_getattr = getattr
else:
def _getattr(obj, name):
return getattr(obj, name, default)
return reduce(_getattr, [self] + attr.split('.'))
def remove(self, index, descriptor):
if index == 'all':
self.rsetattr(descriptor, [])
else:
analysis = self.rgetattr(descriptor)
del analysis[index]
#_________________________________________________________________#
# Basket class #
#_________________________________________________________________#
class Basket:
"""
A basket where sounds and analysis can be loaded
>>> c = manager.Client()
>>> b = c.new_basket()
TODO : add comments attribute, title...
"""
def __init__(self, client):
self.sounds = []
self.analysis = Analysis() # the use of the nested object is not rly good...
self.analysis_stats = []
self.analysis_stats_names = []
self.ids = []
self.analysis_names = []
self.parent_client = client
self._update_sound_client()
def __add__(self, other):
"""
Concatenate two baskets
TODO : adapt it to new changes & make sure the order is not broken
"""
sumBasket = copy.deepcopy(self)
for i in range(len(other.sounds)):
sumBasket.ids.append(other.ids[i])
sumBasket.sounds.append(other.sounds[i])
sumBasket._remove_duplicate()
return sumBasket
def __sub__(self, other):
"""
Return a basket with elements of self that are not in other
"""
subBasket = copy.deepcopy(self)
idx_to_remove = [x[0] for x in enumerate(self.ids) if x[1] in other.ids]
subBasket.remove(idx_to_remove)
return subBasket
def __len__(self):
return len(self.ids)
def _actualize(self): # used when an old basket is loaded from pickle
if not hasattr(self, 'analysis_stats'):
self.analysis_stats = []
def _update_sound_client(self):
for i, sound in enumerate(self.sounds):
if sound is not None:
sound.client = self.parent_client
def _remove_duplicate(self):
# TODO : add method to concatenate analysis in Analysis() (won't have to reload json...)
ids_old = self.ids
sounds_old = self.sounds
self.ids = []
self.sounds = []
nbSounds = len(ids_old)
for i in range(nbSounds):
if ids_old[i] not in self.ids:
self.ids.append(ids_old[i])
self.sounds.append(sounds_old[i])
self.update_analysis()
#________________________________________________________________________#
# __________________________ Users functions ____________________________#
def push(self, sound, analysis_stat=None):
"""
>>> sound = c.my_get_sound(query='wind')
>>> b.push(sound)
"""
#sound.name = strip_non_ascii(sound.name)
self.sounds.append(sound)
self.analysis_stats.append(analysis_stat)
if sound is not None:
self.ids.append(sound.id)
else:
self.ids.append(None)
def push_list_id(self, sounds_id):
Bar = ProgressBar(len(sounds_id), LENGTH_BAR, 'Loading sounds')
Bar.update(0)
for idx, id in enumerate(sounds_id):
sound = self.parent_client.my_get_sound(id)
self.push(sound)
Bar.update(idx+1)
def remove(self, index_list):
index_list = sorted(index_list, reverse=True)
for i in index_list:
del self.ids[i]
del self.sounds[i]
try:
del self.analysis_stats[i]
except IndexError:
pass
if hasattr(self, 'clas'):
del self.clas[i]
for descriptor in self.analysis_names:
self.analysis.remove(i, descriptor)
def remove_sounds_with_no_analysis(self):
list_idx_to_remove = []
for idx, analysis in enumerate(self.analysis_stats):
if analysis is None:
list_idx_to_remove.append(idx)
self.remove(list_idx_to_remove)
def update_sounds(self):
"""
Use this method to load the sounds which ids are in the basket
"""
nbSound = len(self.ids)
Bar = ProgressBar(nbSound, LENGTH_BAR, 'Loading sounds')
Bar.update(0)
for i in range(nbSound):
self.sounds.append(self.parent_client.my_get_sound(self.ids[i]))
Bar.update(i+1)
def add_analysis(self, descriptor):
"""
Use this method to add the analysis.
All the current loaded analysis will be erased
All the analysis of the loaded sound ids will be loaded
>>> results_pager = c.my_text_search(query='wind')
>>> b.load_sounds(results_pager)
>>> b.add_analysis('lowlevel.mfcc')
"""
if descriptor in self.analysis_names:
print 'The %s analysis are already loaded' % descriptor
else:
nbSound = len(self.ids)
allFrames = []
Bar = ProgressBar(nbSound,LENGTH_BAR, 'Loading ' + descriptor + ' analysis')
Bar.update(0)
for i in range(nbSound):
allFrames.append(self.parent_client.my_get_analysis(self.ids[i], descriptor))
Bar.update(i+1)
self.analysis_names.append(descriptor)
self.analysis.rsetattr(descriptor, allFrames)
def update_analysis(self):
for nameAnalysis in self.analysis_names:
allFrames = self.analysis.rgetattr(nameAnalysis)
nbAnalysis = len(allFrames)
nbAnalysisToLoad = len(self.ids) - nbAnalysis
Bar = ProgressBar(nbAnalysisToLoad, LENGTH_BAR, 'Loading ' + nameAnalysis + ' analysis')
Bar.update(0)
for i in range(nbAnalysisToLoad):
Bar.update(i + 1)
allFrames.append(self.parent_client.my_get_analysis(self.ids[i+nbAnalysis], nameAnalysis))
def add_analysis_stats(self):
"""
Use this method to add all analysis stats to all sounds in the basket
(means and var of descriptors)
"""
#self.analysis_stats = []
nbSounds = len(self.sounds)
Bar = ProgressBar(nbSounds, LENGTH_BAR, 'Loading analysis stats')
Bar.update(0)
for i, sound in enumerate(self.sounds):
Bar.update(i + 1)
if sound is not None:
analysis = self.parent_client.my_get_analysis_stats(sound.id)
self.analysis_stats[i] = analysis
else:
self.analysis_stats[i] = None # HERE CHANGED APPEND TO I, is it ok ?
# try:
# self.analysis_stats.append(sound.get_analysis())
# except freesound.FreesoundException:
# pass
# FUNCTION FOR ADDING STATS OF ONLY ONE ANALYSIS
def add_one_analysis_stats(self, descriptor):
nbSounds = len(self.sounds)
Bar = ProgressBar(nbSounds, LENGTH_BAR, 'Loading analysis stats')
Bar.update(0)
for i, sound in enumerate(self.sounds):
Bar.update(i + 1)
if sound is not None:
analysis = self.parent_client.my_get_one_analysis_stats(sound.id, descriptor)
self.analysis_stats[i] = analysis
else:
self.analysis_stats[i] = None
def remove_analysis(self, descriptor):
if descriptor in self.analysis_names:
self.analysis.remove('all', descriptor)
self.analysis_names.remove(descriptor)
def load_sounds_(self, results_pager, begin_idx=0, debugger=None):
"""
IN PROGRESS
This function is used when the data to load in the basket is in the pager (and not just the id like for the next function)
"""
nbSound = results_pager.count
numSound = begin_idx # for iteration
results_pager_last = results_pager
Bar = ProgressBar(nbSound,LENGTH_BAR,'Loading sounds')
Bar.update(0)
# 1st iteration # maybe there is a better way to iterate through pages...
for sound in results_pager:
try:
self.push(sound, sound.analysis)
except AttributeError:
self.push(sound)
numSound = numSound+1
Bar.update(numSound+1)
# next iteration
while (numSound<nbSound):
count = 0
while 1: # care with this infinite loop...
count += 1
if count>10: # MAYBE SOME BUG HERE
print 'could not get more sounds'
break
try:
results_pager = results_pager_last.next_page()
if debugger:
debugger.append(results_pager)
break
except:
exc_info = sys.exc_info()
sleep(1)
print exc_info
for sound in results_pager:
try:
self.push(sound, sound.analysis)
except AttributeError:
self.push(sound)
numSound = numSound+1
Bar.update(numSound+1)
results_pager_last = results_pager
def extract_descriptor_stats(self, scale=False):
"""
Returns a list of the scaled and concatenated descriptor stats - mean and var (all the one that are loaded in the Basket) for all sounds in the Basket.
"""
feature_vector = []
for analysis_stats in self.analysis_stats:
feature_vector_single_sound = []
for k, v in analysis_stats.as_dict().iteritems():
if k == 'lowlevel':
for k_, v_ in v.iteritems():
try: # some lowlevel descriptors do not have 'mean' 'var' field (eg average_loudness)
# barkbands_kurtosis has 0 variance and that bring dvar and dvar2 to be None...
if isinstance(v_['mean'], list):
feature_vector_single_sound += v_['mean'] # take the mean
feature_vector_single_sound += v_['dmean']
feature_vector_single_sound += v_['dmean2']
feature_vector_single_sound += v_['var'] # var
feature_vector_single_sound += v_['dvar']
feature_vector_single_sound += v_['dvar2']
elif isinstance(v_['mean'], float):
feature_vector_single_sound.append(v_['mean']) # for non array
feature_vector_single_sound.append(v_['dmean'])
feature_vector_single_sound.append(v_['dmean2'])
feature_vector_single_sound.append(v_['var'])
if k_ != 'barkbands_kurtosis': # this descriptor has variance = 0 => produce None values for dvar and dvar2
feature_vector_single_sound.append(v_['dvar'])
feature_vector_single_sound.append(v_['dvar2'])
except: # here we suppose that v_ is already a number to be stored
if isinstance(v_, list):
feature_vector_single_sound += v_
elif isinstance(v_, float):
feature_vector_single_sound.append(v_)
elif k == 'other cat of descriptors':
# sfx, tonal, rhythm
pass
feature_vector.append(feature_vector_single_sound)
if scale:
return preprocessing.scale(feature_vector)
else:
return feature_vector
def extract_one_descriptor_stats(self, scale=False):
"""
A bit dirty. Maybe review de concept of analysis_stat and analysis objects
"""
feature_vector = []
for analysis_stats in self.analysis_stats:
feature_vector_single_sound = []
if isinstance(getattr(analysis_stats,'mean'), list):
feature_vector_single_sound += getattr(analysis_stats,'mean') # take the mean
feature_vector_single_sound += getattr(analysis_stats,'dmean')
feature_vector_single_sound += getattr(analysis_stats,'dmean2')
feature_vector_single_sound += getattr(analysis_stats,'var') # var
feature_vector_single_sound += getattr(analysis_stats,'dvar')
feature_vector_single_sound += getattr(analysis_stats,'dvar2')
elif isinstance(getattr(analysis_stats,'mean'), float):
feature_vector_single_sound.append(getattr(analysis_stats,'mean')) # for non array
feature_vector_single_sound.append(getattr(analysis_stats,'dmean'))
feature_vector_single_sound.append(getattr(analysis_stats,'dmean2'))
feature_vector_single_sound.append(getattr(analysis_stats,'var'))
if k_ != 'barkbands_kurtosis': # this descriptor has variance = 0 => produce None values for dvar and dvar2
feature_vector_single_sound.append(getattr(analysis_stats,'dvar'))
feature_vector_single_sound.append(getattr(analysis_stats,'dvar2'))
feature_vector.append(feature_vector_single_sound)
if scale:
return preprocessing.scale(feature_vector)
else:
return feature_vector
def load_sounds(self, results_pager, begin_idx=0, debugger=None):
"""
Use this method to load all the sounds from a result pager int the basket
this method does not take the objects from the pager but usgin my_get_sound() which return a sound with all the fields
>>> results_pager = c.my_text_search(query='wind')
>>> b.load_sounds(results_pager)
"""
nbSound = results_pager.count
numSound = begin_idx # for iteration
results_pager_last = results_pager
Bar = ProgressBar(nbSound,LENGTH_BAR,'Loading sounds')
Bar.update(0)
# 1st iteration # maybe there is a better way to iterate through pages...
for i in results_pager:
self.push(self.parent_client.my_get_sound(i.id),analysis_stat=None)
numSound = numSound+1
Bar.update(numSound+1)
# next iteration
while (numSound<nbSound):
count = 0
while 1: # care with this infinite loop...
count += 1
if count>10: # MAYBE SOME BUG HERE
print 'could not get more sounds'
break
try:
results_pager = results_pager_last.next_page()
if debugger:
debugger.append(results_pager)
break
except:
exc_info = sys.exc_info()
sleep(1)
print exc_info
for i in results_pager:
self.push(self.parent_client.my_get_sound(i.id),analysis_stat=None)
numSound = numSound+1
Bar.update(numSound+1)
results_pager_last = results_pager
def retrieve_previews(self, new_folder = None):
folder = './previews/'
if new_folder is not None:
folder += new_folder
if not os.path.exists(folder):
os.makedirs(folder)
nbSounds = len(self.sounds)
Bar = ProgressBar(nbSounds, LENGTH_BAR, 'Downloading previews')
Bar.update(0)
for i in range(nbSounds):
Bar.update(i+1)
self.sounds[i].retrieve_preview(folder)
def save(self, name):
"""
Use this method to save a basket
Only ids and analysis name(s) are saved in a list [ [id1,...idn], [analysis, ...] ]
TODO : change it and save it as a dict (more flexible and stable regarding changes)
"""
settings = SettingsSingleton()
if name and not (name in settings.local_baskets):
basket = [self.ids]
basket.append(self.analysis_names)
nameFile = 'baskets/' + name + '.json'
with open(nameFile, 'w') as outfile:
json.dump(basket, outfile)
settings.local_baskets.append(name)
else:
overwrite = raw_input(name + ' basket already exists. Do you want to replace it ? (y/n)')
if overwrite == 'y':
settings.local_baskets.remove(name)
self.save(name)
else:
print 'Basket was not saved'
def load(self,name):
"""
Use thise method to load a basket from json files
"""
self.sounds = []
settings = SettingsSingleton()
if name and name in settings.local_baskets:
nameFile = 'baskets/' + name + '.json'
with open(nameFile) as infile:
basket = simplejson.load(infile)
ids = basket[0]
nbSounds = len(ids)
for i in range(nbSounds):
self.ids.append(ids[i])
self.update_sounds()
self.analysis_names = basket[1]
self.update_analysis()
else:
print '%s basket does not exist' % name
def save_pickle(self, name):
settings = SettingsSingleton()
if name and not (name in settings.local_baskets_pickle):
self.parent_client.save_pickle(self, name, 'baskets_pickle/')
settings.local_baskets_pickle.append(name)
else:
overwrite = raw_input(name + ' basket already exists. Do you want to replace it ? (y/n)')
if overwrite == 'y':
self.parent_client.save_pickle(self, name, 'baskets_pickle/')
else:
print 'Basket was not saved'
#________________________________________________________________________#
# __________________________ Language tools _____________________________#
# TODO: CREATE A CLASS FOR THIS TOOLS, AND SEPARATE FROM BASKET
def tags_lower(self):
for idx, s in enumerate(self.sounds):
self.sounds[idx].tags = [t.lower() for t in s.tags]
def text_preprocessing(self):
stemmer = PorterStemmer()
for idx, s in enumerate(self.sounds):
self.sounds[idx].tags = [stemmer.stem(t.lower()) for t in s.tags]
def return_tags_occurrences_dict(self):
#tags = self.tags_extract_all()
tags = list(set(flat_list([sound.tags for sound in self.sounds])))
#default_value = [0, []]
tags_occurrences = {key:[0, []] for key in tags}
Bar = ProgressBar(len(self.sounds), LENGTH_BAR, 'Counting')
Bar.update(0)
for idx, sound in enumerate(self.sounds):
Bar.update(idx+1)
for tag in sound.tags:
tags_occurrences[tag][0] += 1
tags_occurrences[tag][1].append(sound.id)
return tags_occurrences
def return_tags_occurrences(self):
#tags = self.tags_extract_all()
tags = list(set(flat_list([sound.tags for sound in self.sounds])))
#default_value = [0, []]
tags_occurrences = {key:[0, []] for key in tags}
Bar = ProgressBar(len(self.sounds), LENGTH_BAR, 'Counting')
Bar.update(0)
for idx, sound in enumerate(self.sounds):
Bar.update(idx+1)
for tag in sound.tags:
tags_occurrences[tag][0] += 1
tags_occurrences[tag][1].append(sound.id)
# putting it in the old format for compatibility with other methods:
#list of tuples (tag, nb_occurrences, [sound ids])
all_tags_occurrences = []
for t in tags_occurrences.keys():
all_tags_occurrences.append((t, tags_occurrences[t][0], tags_occurrences[t][1]))
all_tags_occurrences = sorted(all_tags_occurrences, key=lambda oc: oc[1])
all_tags_occurrences.reverse()
return all_tags_occurrences
def tags_occurrences(self):
"""
Returns a list of tuples (tag, nb_occurrences, [sound ids])
The list is sorted by number of occurrences of tags
"""
all_tags_occurrences = []
tags = self.tags_extract_all()
Bar = ProgressBar(len(tags), LENGTH_BAR, 'Thinking ...')
Bar.update(0)
for idx, tag in enumerate(tags):
Bar.update(idx+1)
tag_occurrences = self.tag_occurrences(tag)
all_tags_occurrences.append((tag, tag_occurrences[0], tag_occurrences[1]))
all_tags_occurrences = sorted(all_tags_occurrences, key=lambda oc: oc[1])
all_tags_occurrences.reverse()
return all_tags_occurrences
def terms_occurrences(self, terms_sounds):
"""
Input: list of list of terms for each sound
Returns a list of tuples (terms, nb_occurrences, [sound ids])
The list is sorted by number of occurrences of tags
Typicaly: t = basket.preprocessing_tag_description()
t_o = basket.terms_occurrences(t)
nlp(basket, t_o)
WARNING: nlp check the tags only... !!!!!!!!!!
"""
all_terms_occurrences = []
terms = list(set([item for sublist in terms_sounds for item in sublist]))
Bar = ProgressBar(len(terms), LENGTH_BAR, 'Thinking ...')
Bar.update(0)
for idx, term in enumerate(terms):
Bar.update(idx+1)
term_occurrences = self.term_occurrences(terms_sounds, term)
all_terms_occurrences.append((term, term_occurrences[0], term_occurrences[1]))
all_terms_occurrences = sorted(all_terms_occurrences, key=lambda oc: oc[1])
all_terms_occurrences.reverse()
return all_terms_occurrences
def term_occurrences(self, l, term):
ids = []
for i, sound_terms in enumerate(l):
if term in sound_terms:
ids.append(i)
number = len(ids)
return number, ids
def tag_occurrences(self, tag):
ids = []
for i, sound in enumerate(self.sounds):
if sound is not None:
if tag in sound.tags:
ids.append(i)
number = len(ids)
return number, ids
def description_occurrences(self, stri):
ids = []
for i in range(len(self.sounds)):
if stri in self.sounds[i].description:
ids.append(i)
number = len(ids)
return number, ids
def tags_extract_all(self):
tags = []
Bar = ProgressBar(len(self.sounds), LENGTH_BAR, 'Extracting tags')
Bar.update(0)
for idx, sound in enumerate(self.sounds):
Bar.update(idx + 1)
if sound is not None:
for tag in sound.tags:
if tag not in tags:
tags.append(tag)
return tags
def create_sound_tag_dict(self):
"""
Returns a dictionary with sound id in keys and tags in values
"""
sound_tag_dict = {}
for sound in self.sounds:
sound_tag_dict[sound.id] = sound.tags
return sound_tag_dict
def get_preprocessed_descriptions_word2vec(self):
"""
Returns a list of sentences from sound descriptions in the basket.
Preprocessing is done (remove special characters, Porter Stemming, lower case)
"""
stemmer = PorterStemmer()
delimiters = '.', '?', '!', ':'
def split(delimiters, string, maxsplit=0):
regexPattern = '|'.join(map(re.escape, delimiters))
return re.split(regexPattern, string, maxsplit)
all_descriptions = [a.description.lower() for a in self.sounds]
sentences = []
for description in all_descriptions:
string = description.replace('\r\n', ' ')
string = string.replace('(', ' ')
string = string.replace(')', ' ')
string = string.replace('*', '')
string = string.replace('-', '')
string = string.replace('#', '')
string = string.replace(',', '')
string = string.replace('/', '')
string = re.sub('<a href(.)+>', ' ', string)
string = split(delimiters, string)
for string_sentence in string:
if string_sentence is not u'':
terms_to_append = [stemmer.stem(a) for a in string_sentence.split()]
sentences.append(terms_to_append)
return sentences
def word2vec(self, sentences, size=50):
from gensim.models import Word2Vec
return Word2Vec(sentences, size=size, window=500, min_count=10, workers=8)
def doc2vec(self, documents, size=50):
"""
This method seems to give worse result on returning most similar terms for violin, bright
"""
from gensim.models import Doc2Vec
return Doc2Vec(documents, size=size, window=500, min_count=10, workers=8)
def preprocessing_tag_description(self):
"""
Preprocessing tags and descriptions
Returns an array containing arrays of terms for each sound
Steps for descriptions : Lower case, remove urls, Tokenization, remove stop words, Stemming (Porter)
tags : Lower case, Stemming
"""
stemmer = PorterStemmer()
en_stop = get_stop_words('en') + ['freesound', 'org']
all_descriptions = [[stemmer.stem(word) for word in CountVectorizer().build_tokenizer()(re.sub('<a href(.)+/a>', ' ', sound.description.lower())) if word not in en_stop] for sound in self.sounds]
all_tags = [[stemmer.stem(tag.lower()) for tag in sound.tags] for sound in self.sounds]
return [tag + description for tag, description in zip(all_tags, all_descriptions)]
def preprocessing_tag(self):
stemmer = PorterStemmer()
return [[stemmer.stem(tag.lower()) for tag in sound.tags] for sound in self.sounds]
def preprocessing_doc2vec(self):
from gensim.models.doc2vec import TaggedDocument
stemmer = PorterStemmer()
en_stop = get_stop_words('en') + ['freesound', 'org']
all_descriptions = [[stemmer.stem(word) for word in CountVectorizer().build_tokenizer()(re.sub('<a href(.)+/a>', ' ', sound.description.lower())) if word not in en_stop] for sound in self.sounds]
all_tags = [[stemmer.stem(tag.lower()) for tag in sound.tags] for sound in self.sounds]
return [TaggedDocument(words, tags) for words, tags in zip(all_descriptions, all_tags)]
class TfidfEmbeddingVectorizer(object):
def __init__(self, w2v_model):
self.word2vec = dict(zip(w2v_model.index2word, w2v_model.syn0))
self.word2weight = None
self.dim = len(w2v_model.syn0[0])
def fit(self, X, y):
tfidf = TfidfVectorizer(analyzer=lambda x: x)
tfidf.fit(X)
# if a word was never seen - it must be at least as infrequent
# as any of the known words - so the default idf is the max of
# known idf's
max_idf = max(tfidf.idf_)
self.word2weight = defaultdict(
lambda: max_idf,
[(w, tfidf.idf_[i]) for w, i in tfidf.vocabulary_.items()])
return self
def transform(self, X):
return np.array([
np.mean([self.word2vec[w] * self.word2weight[w]
for w in words if w in self.word2vec] or
[np.zeros(self.dim)], axis=0)
for words in X
])
#_________________________________________________________________#
# NLP class #
#_________________________________________________________________#
class Nlp:
"""
Methods for creating sparse occurrences matrix, similarity, graphs, etc...
"""
def __init__(self, basket, tags_occurrences = None):
if tags_occurrences:
self.tags_occurrences = tags_occurrences
else:
self.tags_occurrences = basket.tags_occurrences()
self.set_tags = [tag[0] for tag in self.tags_occurrences]
self.freesound_sound_id = [sound.id for sound in basket.sounds]
self.sound_tags = [sound.tags for sound in basket.sounds]
self.inverted_tag_index = self._inverted_tag_index(self.set_tags)
self.nb_sound = len(self.freesound_sound_id)
self.nb_tag = len(self.set_tags)
def _inverted_tag_index(self, set_tags):
inverted_tag_index = dict()
for idx, tag in enumerate(set_tags):
inverted_tag_index[tag] = idx
return inverted_tag_index
def create_sound_tag_matrix(self):
"""
Returns scipy sparse matrix sound id / tag (2d array) - lil_matrix
Sounds are ordered like in the Basket (=self object)
Tags are ordered like in the tags_occurrences list
"""
Bar = ProgressBar(self.nb_sound, LENGTH_BAR, 'Creating matrix...')
Bar.update(0)
self.sound_tag_matrix = scipy.sparse.lil_matrix((self.nb_sound,self.nb_tag), dtype=int)
for idx_sound, tags in enumerate(self.sound_tags):
Bar.update(idx_sound+1)
for tag in tags:
try:
self.sound_tag_matrix[idx_sound, self.inverted_tag_index[tag]] = 1
except KeyError:
pass
def return_tag_cooccurrences_matrix(self):
"""
Returns the tag to tag cooccurrences matrix by doing A_transpose * A where A is the sound to tag matrix occurrences
"""
try:
return self.sound_tag_matrix.transpose() * self.sound_tag_matrix
except:
print 'Create fist the sound tag matrix using create_sound_tag_matrix method'
@staticmethod
def return_similarity_matrix_tags(tag_something_matrix):
"""
Returns a tag similarity matrix computed with cosine distance from the given matrix
MemoryError problem
"""
tag_similarity_matrix = cosine_similarity(tag_something_matrix)
return tag_similarity_matrix
def return_my_similarity_matrix_tags(self, tag_something_matrix):
"""
TOO SLOW !!!
Returns a tag similarity matrix computed with cosine distance from the given matrix
"""
size_matrix = tag_something_matrix.shape[0]
tag_similarity_matrix = np.zeros(shape=(size_matrix,size_matrix), dtype='float32')
Bar = ProgressBar(size_matrix*size_matrix/2, LENGTH_BAR, 'Calculating similarities...')
Bar.update(0)
for i0 in range(size_matrix):
row0 = tag_something_matrix.getrow(i0).toarray()
for i1 in range(i0):
Bar.update(i0*size_matrix + i1 + 1)
row1 = tag_something_matrix.getrow(i1).toarray()
tag_similarity_matrix[i0][i1] = 1 - spatial.distance.cosine(row0, row1)
tag_similarity_matrix[i1][i0] = tag_similarity_matrix[i0][i1]
return tag_similarity_matrix
"""
PRINT SOME SIMILARITIES BTW TAGS:
for i in range(200):
print str(i).ljust(10) + set_tags[i].ljust(30) + str(sim[67,i])
"""
def create_tag_sound_matrix(self, tags_occurrences):
"""
DO NOT USE THIS - TODO: implement it like sound_tag_matrix. Or just call create_sound_tag_matrix and transpose it...
Returns a matrix tag / sound id
Ordered like in tags_occurrences and in the Basket (=self)
"""
tag_sound_matrix = []
for tag in tags_occurrences:
sound_vect = [0] * len(self.nb_sound)
for sound_id_in_basket in tag[2]:
sound_vect[sound_id_in_basket] = 1
tag_sound_matrix.append(sound_vect)
return tag_sound_matrix
@staticmethod
def nearest_neighbors(similarity_matrix, idx, k):
distances = []
for x in range(len(similarity_matrix)):
distances.append((x,similarity_matrix[idx][x]))
distances.sort(key=operator.itemgetter(1), reverse=True)
return [d[0] for d in distances[0:k]]
def knn(self, similarity_matrix, idx, k, freesound_ids):
distances = zip(freesound_ids, similarity_matrix[idx]) # zip together ids and similarity
distances.sort(key=operator.itemgetter(1), reverse=True)
return [d for d in distances[0:k]]
def compute_knn_similarities(self, feature_matrix):
"""
Created to compute all the 10000th most similar pairwise cosine similarity for FreesoundDB, text features
Takes to much memory. Store result in db during...
"""
sql = SQLManager('freesound_similarities')
#dict_nn = {}
Bar = ProgressBar(feature_matrix.shape[0], LENGTH_BAR, 'Computing similarities')
Bar.update(0)
bar_k = 0
for k in range(int(np.ceil(feature_matrix.shape[0]/1000.0))):
pairwise = cosine_similarity(feature_matrix[1000*k:1000*(k+1)], feature_matrix)
for idx in range(pairwise.shape[0]):
#dict_nn[self.freesound_sound_id[idx+k*1000]] = self.knn(pairwise,idx,10000)
sql.cur.execute('insert into nearest2(freesound_id, data) values(%s, %s)', (self.freesound_sound_id[idx+k*1000], json.dumps(self.knn(pairwise,idx,10000, self.freesound_sound_id))))
bar_k +=1
Bar.update(bar_k)
sql.conn.commit()
#return dict_nn
def create_graph_from_nearest(self):
k_nn = 100
sql = SQLManager('freesound_similarities')
g = nx.Graph()
fs_ids = [r[0] for r in sql.command('select freesound_id from nearest2 order by freesound_id asc')]
nb_sound = len(fs_ids)
Bar = ProgressBar(nb_sound, LENGTH_BAR, 'Creating Graph')
Bar.update(0)
g.add_nodes_from(fs_ids)
for i, fs_id in enumerate(fs_ids):
Bar.update(i+1)
g.add_edges_from([(fs_id, r[0]) for r in sql.command('select data from nearest2 where freesound_id = %s', (str(fs_id),))[0][0]][:k_nn])
return g
def create_graph_text_file(self):
# pb with idx! gen_louvain needs idx in range
k_nn = 100
sql = SQLManager('freesound_similarities')
fs_ids = [r[0] for r in sql.command('select freesound_id from nearest2 order by freesound_id asc')]
nb_sound = len(fs_ids)
f = open('graph.txt', 'w')
Bar = ProgressBar(nb_sound, LENGTH_BAR, 'Generating text file')
Bar.update(0)
for i, fs_id in enumerate(fs_ids):
Bar.update(i+1)
list_edges = ''.join([str(str(fs_id) + ' ' + str(r[0]) + '\n') for r in sql.command('select data from nearest2 where freesound_id = %s', (str(fs_id),))[0][0]][:k_nn])
f.write(list_edges)
f.close()
def create_weighted_graph_text_file(self):
threshold = 0.0
k_nn = 5000
sql = SQLManager('freesound_similarities')
fs_ids = [r[0] for r in sql.command('select freesound_id from nearest2 order by freesound_id asc')]
fs_id_to_id = {}
for idx,i in enumerate(fs_ids):
fs_id_to_id[i]=idx
nb_sound = len(fs_ids)
f = open('graph.txt', 'w')
Bar = ProgressBar(nb_sound, LENGTH_BAR, 'Generating text file')
Bar.update(0)
for i, fs_id in enumerate(fs_ids):
Bar.update(i+1)
list_edges = ''.join([str(str(fs_id_to_id[fs_id]) + ' ' + str(fs_id_to_id[r[0]]) + ' ' + str(r[1]) + '\n') for r in sql.command('select data from nearest2 where freesound_id = %s', (str(fs_id),))[0][0] if r[1]>threshold][:k_nn])
f.write(list_edges)
f.close()
def return_lda_model(self, sound_tag_matrix, n_topics):
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online',
learning_offset=50.,
random_state=0,
n_jobs=1)
lda.fit(sound_tag_matrix)
return lda
#return lda.transform(sound_tag_matrix)
# __________________ GRAPH __________________ #
# def create_knn_graph_igraph(self, similarity_matrix, k):
# """ Returns a knn graph from a similarity matrix - igraph module """
# np.fill_diagonal(similarity_matrix, 0) # for removing the 1 from diagonal
# g = ig.Graph(directed=True)
# g.add_vertices(len(similarity_matrix))
# g.vs["b_id"] = range(len(similarity_matrix))
# for idx in range(len(similarity_matrix)):
# g.add_edges([(idx, i) for i in self.nearest_neighbors(similarity_matrix, idx, k)])
# print idx, self.nearest_neighbors(similarity_matrix, idx, k)
# return g
def create_knn_graph(self, similarity_matrix, k):
""" Returns a knn graph from a similarity matrix - NetworkX module """
np.fill_diagonal(similarity_matrix, 0) # for removing the 1 from diagonal
g = nx.Graph()
g.add_nodes_from(range(len(similarity_matrix)))
for idx in range(len(similarity_matrix)):
g.add_edges_from([(idx, i) for i in self.nearest_neighbors(similarity_matrix, idx, k)])
print idx, self.nearest_neighbors(similarity_matrix, idx, k)
return g
# OLD
def create_tag_similarity_graph(self, tag_similarity_matrix, tag_names, threshold):
"""
TODO : ADAPT IT FOR NetworkX package
Returns the tag similarity graph (unweighted) from the tag similarity matrix
"""
g = Graph()
g.add_vertices(len(tag_names))
g.vs["name"] = tag_names
g.vs["label"] = g.vs["name"]
for tag_i in range(len(tag_similarity_matrix)):
for tag_j in range(len(tag_similarity_matrix)):
if tag_i < tag_j:
if tag_similarity_matrix[tag_i][tag_j] > threshold:
g.add_edge(tag_i, tag_j)
return g
def get_centrality_from_graph(self, graph):
return g.evcent()
# TODO : ORDER TAG BY CENTRALITY
# name_cent = [ (t[i], cent[i]) for i in range(len(t))]
# name_cent.sort(key=lambda x: x[1], reverse=True)
# TODO : CREATE FUNCTION FOR CREATION OF TAXONOMY
# g2 = Graph.Tree(2,1)
# g2.add_vertices(58978)
# g2.vs["name"] = names
# g2.vs["label"] = g2.vs["name"]
# list_tags_in_tax = [0]
#
# for idx in range(58978):
# idx = idx + 1
# maxCandidateVal = 0
# for tag_1 in list_tags_in_tax:
# if not tag_1 == idx:
# if s_m_t[idx][tag_1] > maxCandidateVal:
# maxCandidateVal = s_m_t[idx][tag_1]
# maxCandidate = tag_1
# if maxCandidateVal > 0.5:
# g2.add_edge(tag_1+1,idx+1)
# print 'added edge'
# print maxCandidateVal
# else:
# g2.add_edge(0,idx+1)
# print 'added edge to root'
# list_tags_in_tax.append(idx)
# TODO : PUT THIS GRAPH THINGS IN AN OTHER CLASS
# _________________________________________________________________#
# SQL class #
# _________________________________________________________________#
class SQLManager:
"""
This class uses psycopg2 to access psql database
"""
def __init__(self, db_name ='freesound'):
self.db_name = db_name # the db had been previously imported with psql
self.conn_text = 'dbname=' + self.db_name + ' user=xavier' # the user has been created and has access to the db
self.connect()
def connect(self):
self.conn = psycopg2.connect(self.conn_text)
self.cur = self.conn.cursor()
def disconnect(self):
self.cur.close()
self.conn.close()
def command(self, command, option = None):
self.cur.execute(command,option)
return self.cur.fetchall()
# _________________________________________________________________#
# Graylog API class #
# _________________________________________________________________#
class GraylogManager:
def __init__(self):
self.limit_item = 5000
self.auth = self._get_auth()
self.url = 'http://logserver.mtg.upf.edu/graylog/api/'
self.url_search_query = '/search/universal/absolute?query=query&' \
'&limit=' + str(int(self.limit_item)) + '&sort=timestamp%3Aasc&fields=message'
self.sql = SQLManager('freesound_queries')
self.date_last_query_in_db = self._get_date_last_query_in_db()
def restart(self):
self.sql.disconnect()
self.sql = SQLManager('freesound_queries')
def _get_auth(self):
import graylog_auth # you need to create a py file with variable auth = (user,password)
return graylog_auth.auth
def _get_date_last_query_in_db(self):
last_date = self.sql.command('select timestamp from queries3 order by id desc limit 1')
try:
last_date = last_date[0][0].isoformat()
last_date = last_date[:-3]
last_date = last_date[:-1] + str(int(last_date[-1])+1) + 'Z' # one milisec is added in order to not get the last query
# last_date = last_date[0][0].isoformat()
# last_date = last_date[:-1] + str(int(last_date[-1])+1) + '.000Z'
except IndexError:
last_date = '2016-05-11T11:20:24.000Z'
return last_date
def _get_date_first_query_in_db(self):
last_date = self.sql.command('select timestamp from queries3 order by timestamp asc limit 1')
try:
last_date = last_date[0][0].isoformat()
last_date = last_date[:-3]
last_date = last_date[:-1] + str(int(last_date[-1])) + 'Z'
except IndexError:
last_date = None
return last_date
def _get_last_index(self):
try:
last_idx = last_date = self.sql.command('select id from queries3 order by id desc limit 1')
last_idx = last_idx[0][0]
except IndexError:
last_idx = -1
return last_idx
def _get_time(self):
timetup = gmtime()
return strftime('%Y-%m-%dT%H:%M:%S.000Z', timetup)
@staticmethod
def _request(u, auth):
headers = {'Content-type': 'application/json', 'Accept': 'application/json'}
counter = 0
while 1: # Loop for trying 10 times the request with 1 sec delay
counter += 1
if counter > 10:
r = None
break
try:
r = requests.get(u, auth=auth, headers = headers)
break
except Exception as e:
print e
sleep(1)
return r
def get_users_search_queries(self, from_date, to_date, offset=0, tot_results=None):
u = self.url + self.url_search_query + '&offset=' + str(offset) + '&'\
+ 'from=' + str(from_date) + '&to=' \
+ str(to_date)
r = self._request(u, self.auth)
#print u
# some manipulation of the data (the dict given with the graylog api request
# is really nested, it makes it a bit dirty)
if r is not None:
try:
r = r.json()
except ValueError as e:
print 'No JSON object could be decoded'
return [], 0
try:
total_results = r['total_results']
except KeyError:
total_results = tot_results
#print r
r = r['messages']
nb = len(r)
list_queries = [(r[i]['message']['timestamp'],
r[i]['message']['message']) for i in range(nb)]
else:
list_queries = []
total_results = 0
return list_queries, total_results
def get_all_search_queries(self):
from_date = '2014-01-23T15:34:49.000Z'
to_date = self._get_time()
list_queries, total_results = self.get_users_search_queries(from_date, to_date)
all_queries = list_queries
nb_pages = int(ceil(total_results / float(self.limit_item)))
Bar = ProgressBar(nb_pages, LENGTH_BAR, 'Requests on Graylog')
Bar.update(0)
for i in range(nb_pages-1):
Bar.update(i+1)
list_queries, total_results = self.get_users_search_queries(from_date, to_date, (i+1)*self.limit_item)
all_queries = all_queries + list_queries
return all_queries
def get_new_users_search_queries(self, offset=0):
from_date = self._get_date_last_query_in_db()
to_date = self._get_time()
print from_date, to_date
list_queries, total_results = self.get_users_search_queries(from_date, to_date, offset)
return list_queries, total_results
def get_all_new_search_queries(self):
list_queries, total_results = self.get_new_users_search_queries()
all_queries = list_queries
nb_pages = int(ceil(total_results / float(self.limit_item)))
Bar = ProgressBar(nb_pages, LENGTH_BAR, 'Requests on Graylog')
Bar.update(1)
for i in range(nb_pages-1):
list_queries, total_results = self.get_new_users_search_queries((i+1)*self.limit_item)
all_queries = all_queries + list_queries
Bar.update(i + 2)
return all_queries
@staticmethod
def organize_all_queries(all_queries, progressbar=None):
# there is a bug for the api queries in this fuction, return None in this case...
# TODO USE RE HERE AND MAKE IT SIMPLE .....
if progressbar:
Bar = ProgressBar(len(all_queries), LENGTH_BAR, 'Organizing queries')
Bar.update(0)
all_queries_organized = []
for i, item in enumerate(all_queries):
search = item[1]
if len(search) > 7: # str too short means that the log is not from a search query
if search[8] == '!': # when it is a query from the API, there is a '!' in position 8
search_split = search.split(' #!# ')
if search_split[2] == '': # there are two cases : One with options and one without
search = json.loads(search_split[1])
else:
search = json.loads(search_split[1]).update(json.loads(search_split[2]))
all_queries_organized.append((item[0], search, 'api'))
elif search[:6] == 'Search': # a query from web send a log with 'Search' (upper S)
search_split = json.loads(search.split('Search (')[1][:-1])
all_queries_organized.append((item[0], search_split, 'web'))
if progressbar:
Bar.update(i+1)
return all_queries_organized
def fill_freesound_queries_db(self, all_queries, progressbar=None):
# db has been previously build with a 'queries' table : (id int, timestamp timestamp, data jsonb, api char(50))
if progressbar:
Bar = ProgressBar(len(all_queries), LENGTH_BAR, 'Filling psql db')
Bar.update(0)
last_idx = self._get_last_index()
cur = self.sql.cur
for i, item in enumerate(all_queries):
if item[1] is not None:
t = item[0].replace('T', ' ')
t = t.replace('Z', '')
idx = i + last_idx + 1
try:
cur.execute('insert into queries3(timestamp, data, api) values(%s, %s, %s)',
(t, json.dumps(item[1]), item[2]))
except Exception as e:
print e
self.restart()
cur = self.sql.cur
print ' One query is dropped %s' % item[1]
idx = idx - 1
if progressbar:
Bar.update(i+1)
#self.sql.conn.commit()
def update_freesound_queries_db(self):
self.date_last_query_in_db = self._get_date_last_query_in_db()
all_queries = self.get_all_new_search_queries()
all_queries = self.organize_all_queries(all_queries)
self.fill_freesound_queries_db(all_queries, True)
def update_freesound_queries_db_page_by_page(self): # for big amount of data
from_date = self._get_date_last_query_in_db()
to_date = self._get_time()
list_queries, total_results = self.get_users_search_queries(from_date, to_date)
nb_pages = int(ceil(total_results / float(self.limit_item)))
Bar = ProgressBar(nb_pages, LENGTH_BAR, 'Updating the DB')
all_queries = self.organize_all_queries(list_queries)
self.fill_freesound_queries_db(all_queries)
self.sql.conn.commit()
Bar.update(1)
for i in range(nb_pages - 1):
list_queries, total_results = self.get_users_search_queries(from_date, to_date, (i + 1) * self.limit_item, tot_results = total_results)
all_queries = self.organize_all_queries(list_queries)
self.fill_freesound_queries_db(all_queries)
self.sql.conn.commit()
Bar.update(i + 2)
def _grayDate_to_psqlDate(self, date):
new_date = date.replace('T', ' ').replace('Z', '')
return new_date
def _psqlDate_to_grayDate(self, date):
new_date = date
new_date = new_date[:-3]
new_date = new_date[:-1] + new_date[-1] + 'Z'
return new_date
def query_number_by_day(self, start_date = '2016-05-01', end_date = None):
"""
Returns the number of queries per day
WARNING: THE PAGE CONDITION MAY FAIL FOR OLD LOGS
"""
if end_date == None:
end_date = self._get_time()
dates = np.array(pd.date_range(start = start_date, end = end_date , freq = 'D'))
Bar = ProgressBar(len(dates)-1, LENGTH_BAR, 'Requesting DB')
Bar.update(0)
count_per_days = []
for idx, d in enumerate(dates[:-1]):
Bar.update(idx+1)
count_per_days.append(self.sql.command("select count(*) from (select data from queries3 where timestamp > %s and timestamp < %s) as foo where data->>'page'='1'", (str(d), str(dates[idx+1]))))
return [int(ss[0][0]) for ss in count_per_days]
def query_profile_per_week(self):
first_date = self._grayDate_to_psqlDate(self._get_date_first_query_in_db())
last_date = self._grayDate_to_psqlDate(self._get_date_last_query_in_db())
first_year = first_date[:4] + '-W' + '1'
last_year = last_date[:4] + '-W' + '52'
diff_years = int(last_year[:4]) - int(first_year[:4])
list_weeks = []
for i in range(diff_years+1):
for j in range(52):
if i == diff_years and j > int(last_year[6:]):
break
list_weeks.append(str(int(first_year[:4]) + i) + '-W' + str(j+1))
nb_query_per_week = []
for i in range(len(list_weeks)-1):
count = self.sql.command('select count(*) from queries3 where timestamp > %s and timestamp < %s',
(datetime.datetime.strptime(list_weeks[i] + '-0', "%Y-W%W-%w"),
datetime.datetime.strptime(list_weeks[i+1] + '-0', "%Y-W%W-%w")))
nb_query_per_week.append(int(count[0][0]))
return nb_query_per_week
# TODO : create a class for utilities
#
#_________________________________________________________________#
# UTILS #
#_________________________________________________________________#
class DictObject:
def __init__(self, json_dict=None):
if not json_dict:
with open('analysis_template.json') as infile:
json_dict = simplejson.load(infile)
self.json_dict = json_dict
def replace_dashes(d):
for k, v in d.items():
if "-" in k:
d[k.replace("-", "_")] = d[k]
del d[k]
if isinstance(v, dict): replace_dashes(v)
replace_dashes(json_dict)
self.__dict__.update(json_dict)
for k, v in json_dict.items():
if isinstance(v, dict):
self.__dict__[k] = DictObject(v)
def flat_list(l):
""" Convert a nested list to a flat list """
try:
return [item for sublist in l for item in sublist]
except:
return l
class ProgressBar:
"""
Progress bar
"""
def __init__ (self, valmax, maxbar, title):
if valmax == 0: valmax = 1
if maxbar > 200: maxbar = 200
self.valmax = valmax
self.maxbar = maxbar
self.title = title
print ''
def update(self, val):
import sys
# format
if val > self.valmax: val = self.valmax
# process
perc = round((float(val) / float(self.valmax)) * 100)
scale = 100.0 / float(self.maxbar)
bar = int(perc / scale)
# render
out = '\r %20s [%s%s] %3d / %3d' % (self.title, '=' * bar, ' ' * (self.maxbar - bar), val, self.valmax)
sys.stdout.write(out)
sys.stdout.flush()
# Needed to remove non asci characters in names
def strip_non_ascii(string):
''' Returns the string without non ASCII characters'''
stripped = (c for c in string if 0 < ord(c) < 127)
return ''.join(stripped)
freesound_rocks_ascii_art = \
" ______ _ _____ _ \n \
| ____| | | | __ \ | | \n \
| |__ _ __ ___ ___ ___ ___ _ _ _ __ __| | | |__) |___ ___| | _____ \n \
| __| '__/ _ \/ _ \/ __|/ _ \| | | | '_ \ / _` | | _ // _ \ / __| |/ / __| \n \
| | | | | __/ __/\__ \ (_) | |_| | | | | (_| | | | \ \ (_) | (__| <\__ \ \n \
|_| |_| \___|\___||___/\___/ \__,_|_| |_|\__,_| |_| \_\___/ \___|_|\_\___/ \n"
| mit |
eramirem/astroML | book_figures/chapter9/fig_svm_diagram.py | 3 | 2321 | """
SVM Diagram
-----------
Figure 9.9
Illustration of SVM. The region between the dashed lines is the margin, and
the points which the dashed lines touch are called the support vectors.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from sklearn import svm
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Create the data
np.random.seed(1)
N1 = 10
N2 = 10
mu1 = np.array([0, 0])
mu2 = np.array([2.0, 2.0])
Cov1 = np.array([[1, -0.5],
[-0.5, 1]])
Cov2 = Cov1
X = np.vstack([np.random.multivariate_normal(mu1, Cov1, N1),
np.random.multivariate_normal(mu2, Cov2, N2)])
y = np.hstack([np.zeros(N1), np.ones(N2)])
#------------------------------------------------------------
# Perform an SVM classification
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
xx = np.linspace(-5, 5)
w = clf.coef_[0]
m = -w[0] / w[1]
b = - clf.intercept_[0] / w[1]
yy = m * xx + b
#------------------------------------------------------------
# find support vectors
i1 = np.argmax(np.dot(X[:N1], w))
i2 = N1 + np.argmin(np.dot(X[N1:], w))
db1 = X[i1, 1] - (m * X[i1, 0] + b)
db2 = X[i2, 1] - (m * X[i2, 0] + b)
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
ax = fig.add_subplot(111, aspect='equal')
ax.scatter(X[:, 0], X[:, 1], c=y, s=30, cmap=plt.cm.binary)
ax.plot(xx, yy, '-k')
ax.plot(xx, yy + db1, '--k')
ax.plot(xx, yy + db2, '--k')
ax.set_ylim(-1.5, 4)
ax.set_xlim(-3, 4)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
plt.show()
| bsd-2-clause |
zejin/water | visualizer.py | 10 | 2005 | #!/usr/bin/env python
"""
Visualize shallow water simulation results.
NB: Requires a modern Matplotlib version; also needs
either FFMPeg (for MP4) or ImageMagick (for GIF)
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as manimation
import sys
def main(infile="waves.out", outfile="out.mp4", startpic="start.png"):
"""Visualize shallow water simulation results.
Args:
infile: Name of input file generated by simulator
outfile: Desired output file (mp4 or gif)
startpic: Name of picture generated at first frame
"""
u = np.fromfile(infile, dtype=np.dtype('f4'))
nx = int(u[0])
ny = int(u[1])
x = range(0,nx)
y = range(0,ny)
u = u[2:]
nframe = len(u) // (nx*ny)
stride = nx // 20
u = np.reshape(u, (nframe,nx,ny))
X, Y = np.meshgrid(x,y)
fig = plt.figure(figsize=(10,10))
def plot_frame(i, stride=5):
ax = fig.add_subplot(111, projection='3d')
ax.set_zlim(0, 2)
Z = u[i,:,:];
ax.plot_surface(X, Y, Z, rstride=stride, cstride=stride)
return ax
if startpic:
ax = plot_frame(0)
plt.savefig(startpic)
plt.delaxes(ax)
metadata = dict(title='Wave animation', artist='Matplotlib')
if outfile[-4:] == ".mp4":
Writer = manimation.writers['ffmpeg']
writer = Writer(fps=15, metadata=metadata,
extra_args=["-r", "30",
"-c:v", "libx264",
"-pix_fmt", "yuv420p"])
elif outfile[-4:] == ".gif":
Writer = manimation.writers['imagemagick']
writer = Writer(fps=15, metadata=metadata)
with writer.saving(fig, outfile, nframe):
for i in range(nframe):
ax = plot_frame(i)
writer.grab_frame()
plt.delaxes(ax)
if __name__ == "__main__":
main(*sys.argv[1:])
| mit |
YinongLong/scikit-learn | sklearn/mixture/dpgmm.py | 5 | 35315 | """Bayesian Gaussian Mixture Models and
Dirichlet Process Gaussian Mixture Models"""
from __future__ import print_function
# Author: Alexandre Passos (alexandre.tp@gmail.com)
# Bertrand Thirion <bertrand.thirion@inria.fr>
#
# Based on mixture.py by:
# Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# Important note for the deprecation cleaning of 0.20 :
# All the function and classes of this file have been deprecated in 0.18.
# When you remove this file please also remove the related files
# - 'sklearn/mixture/gmm.py'
# - 'sklearn/mixture/test_dpgmm.py'
# - 'sklearn/mixture/test_gmm.py'
import numpy as np
from scipy.special import digamma as _digamma, gammaln as _gammaln
from scipy import linalg
from scipy.spatial.distance import cdist
from ..externals.six.moves import xrange
from ..utils import check_random_state, check_array, deprecated
from ..utils.extmath import logsumexp, pinvh, squared_norm
from ..utils.validation import check_is_fitted
from .. import cluster
from .gmm import _GMMBase
@deprecated("The function digamma is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.digamma instead.")
def digamma(x):
return _digamma(x + np.finfo(np.float32).eps)
@deprecated("The function gammaln is deprecated in 0.18 and "
"will be removed in 0.20. Use scipy.special.gammaln instead.")
def gammaln(x):
return _gammaln(x + np.finfo(np.float32).eps)
@deprecated("The function log_normalize is deprecated in 0.18 and "
"will be removed in 0.20.")
def log_normalize(v, axis=0):
"""Normalized probabilities from unnormalized log-probabilites"""
v = np.rollaxis(v, axis)
v = v.copy()
v -= v.max(axis=0)
out = logsumexp(v)
v = np.exp(v - out)
v += np.finfo(np.float32).eps
v /= np.sum(v, axis=0)
return np.swapaxes(v, 0, axis)
@deprecated("The function wishart_log_det is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_log_det(a, b, detB, n_features):
"""Expected value of the log of the determinant of a Wishart
The expected value of the logarithm of the determinant of a
wishart-distributed random variable with the specified parameters."""
l = np.sum(digamma(0.5 * (a - np.arange(-1, n_features - 1))))
l += n_features * np.log(2)
return l + detB
@deprecated("The function wishart_logz is deprecated in 0.18 and "
"will be removed in 0.20.")
def wishart_logz(v, s, dets, n_features):
"The logarithm of the normalization constant for the wishart distribution"
z = 0.
z += 0.5 * v * n_features * np.log(2)
z += (0.25 * (n_features * (n_features - 1)) * np.log(np.pi))
z += 0.5 * v * np.log(dets)
z += np.sum(gammaln(0.5 * (v - np.arange(n_features) + 1)))
return z
def _bound_wishart(a, B, detB):
"""Returns a function of the dof, scale matrix and its determinant
used as an upper bound in variational approximation of the evidence"""
n_features = B.shape[0]
logprior = wishart_logz(a, B, detB, n_features)
logprior -= wishart_logz(n_features,
np.identity(n_features),
1, n_features)
logprior += 0.5 * (a - 1) * wishart_log_det(a, B, detB, n_features)
logprior += 0.5 * a * np.trace(B)
return logprior
##############################################################################
# Variational bound on the log likelihood of each class
##############################################################################
def _sym_quad_form(x, mu, A):
"""helper function to calculate symmetric quadratic form x.T * A * x"""
q = (cdist(x, mu[np.newaxis], "mahalanobis", VI=A) ** 2).reshape(-1)
return q
def _bound_state_log_lik(X, initial_bound, precs, means, covariance_type):
"""Update the bound with likelihood terms, for standard covariance types"""
n_components, n_features = means.shape
n_samples = X.shape[0]
bound = np.empty((n_samples, n_components))
bound[:] = initial_bound
if covariance_type in ['diag', 'spherical']:
for k in range(n_components):
d = X - means[k]
bound[:, k] -= 0.5 * np.sum(d * d * precs[k], axis=1)
elif covariance_type == 'tied':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs)
elif covariance_type == 'full':
for k in range(n_components):
bound[:, k] -= 0.5 * _sym_quad_form(X, means[k], precs[k])
return bound
class _DPGMMBase(_GMMBase):
"""Variational Inference for the Infinite Gaussian Mixture Model.
DPGMM stands for Dirichlet Process Gaussian Mixture Model, and it
is an infinite mixture model with the Dirichlet Process as a prior
distribution on the number of clusters. In practice the
approximate inference algorithm uses a truncated distribution with
a fixed maximum number of components, but almost always the number
of components actually used depends on the data.
Stick-breaking Representation of a Gaussian mixture model
probability distribution. This class allows for easy and efficient
inference of an approximate posterior distribution over the
parameters of a Gaussian mixture model with a variable number of
components (smaller than the truncation parameter n_components).
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <dpgmm>`.
Parameters
----------
n_components: int, default 1
Number of mixture components.
covariance_type: string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha: float, default 1
Real number representing the concentration parameter of
the dirichlet process. Intuitively, the Dirichlet Process
is as likely to start a new cluster for a point as it is
to add that point to a cluster with alpha elements. A
higher alpha means more clusters, as the expected number
of clusters is ``alpha*log(N)``.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_components : int
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
VBGMM : Finite Gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
self.alpha = alpha
super(_DPGMMBase, self).__init__(n_components, covariance_type,
random_state=random_state,
tol=tol, min_covar=min_covar,
n_iter=n_iter, params=params,
init_params=init_params,
verbose=verbose)
def _get_precisions(self):
"""Return precisions as a full matrix."""
if self.covariance_type == 'full':
return self.precs_
elif self.covariance_type in ['diag', 'spherical']:
return [np.diag(cov) for cov in self.precs_]
elif self.covariance_type == 'tied':
return [self.precs_] * self.n_components
def _get_covars(self):
return [pinvh(c) for c in self._get_precisions()]
def _set_covars(self, covars):
raise NotImplementedError("""The variational algorithm does
not support setting the covariance parameters.""")
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
z = np.zeros((X.shape[0], self.n_components))
sd = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dgamma1 = digamma(self.gamma_.T[1]) - sd
dgamma2 = np.zeros(self.n_components)
dgamma2[0] = digamma(self.gamma_[0, 2]) - digamma(self.gamma_[0, 1] +
self.gamma_[0, 2])
for j in range(1, self.n_components):
dgamma2[j] = dgamma2[j - 1] + digamma(self.gamma_[j - 1, 2])
dgamma2[j] -= sd[j - 1]
dgamma = dgamma1 + dgamma2
# Free memory and developers cognitive load:
del dgamma1, dgamma2, sd
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dgamma
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
"""Update the concentration parameters for each cluster"""
sz = np.sum(z, axis=0)
self.gamma_.T[1] = 1. + sz
self.gamma_.T[2].fill(0)
for i in range(self.n_components - 2, -1, -1):
self.gamma_[i, 2] = self.gamma_[i + 1, 2] + sz[i]
self.gamma_.T[2] += self.alpha
def _update_means(self, X, z):
"""Update the variational distributions for the means"""
n_features = X.shape[1]
for k in range(self.n_components):
if self.covariance_type in ['spherical', 'diag']:
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num *= self.precs_[k]
den = 1. + self.precs_[k] * np.sum(z.T[k])
self.means_[k] = num / den
elif self.covariance_type in ['tied', 'full']:
if self.covariance_type == 'tied':
cov = self.precs_
else:
cov = self.precs_[k]
den = np.identity(n_features) + cov * np.sum(z.T[k])
num = np.sum(z.T[k].reshape((-1, 1)) * X, axis=0)
num = np.dot(cov, num)
self.means_[k] = linalg.lstsq(den, num)[0]
def _update_precisions(self, X, z):
"""Update the variational distributions for the precisions"""
n_features = X.shape[1]
if self.covariance_type == 'spherical':
self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
for k in range(self.n_components):
# could be more memory efficient ?
sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
self.scale_[k] = 1.
self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
self.bound_prec_[k] = (
0.5 * n_features * (
digamma(self.dof_[k]) - np.log(self.scale_[k])))
self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T
elif self.covariance_type == 'diag':
for k in range(self.n_components):
self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
sq_diff = (X - self.means_[k]) ** 2 # see comment above
self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
z.T[k], (sq_diff + 1))
self.precs_[k] = self.dof_[k] / self.scale_[k]
self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
- np.log(self.scale_[k]))
self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])
elif self.covariance_type == 'tied':
self.dof_ = 2 + X.shape[0] + n_features
self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
for k in range(self.n_components):
diff = X - self.means_[k]
self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_ = pinvh(self.scale_)
self.precs_ = self.dof_ * self.scale_
self.det_scale_ = linalg.det(self.scale_)
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
sum_resp = np.sum(z.T[k])
self.dof_[k] = 2 + sum_resp + n_features
self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
diff = X - self.means_[k]
self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
self.scale_[k] = pinvh(self.scale_[k])
self.precs_[k] = self.dof_[k] * self.scale_[k]
self.det_scale_[k] = linalg.det(self.scale_[k])
self.bound_prec_[k] = 0.5 * wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
self.scale_[k])
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_.T[1])
print("covariance_type:", self.covariance_type)
def _do_mstep(self, X, z, params):
"""Maximize the variational lower bound
Update each of the parameters to maximize the lower bound."""
self._monitor(X, z, "z")
self._update_concentration(z)
self._monitor(X, z, "gamma")
if 'm' in params:
self._update_means(X, z)
self._monitor(X, z, "mu")
if 'c' in params:
self._update_precisions(X, z)
self._monitor(X, z, "a and b", end=True)
def _initialize_gamma(self):
"Initializes the concentration parameters"
self.gamma_ = self.alpha * np.ones((self.n_components, 3))
def _bound_concentration(self):
"""The variational lower bound for the concentration parameter."""
logprior = gammaln(self.alpha) * self.n_components
logprior += np.sum((self.alpha - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior += np.sum(- gammaln(self.gamma_.T[1] + self.gamma_.T[2]))
logprior += np.sum(gammaln(self.gamma_.T[1]) +
gammaln(self.gamma_.T[2]))
logprior -= np.sum((self.gamma_.T[1] - 1) * (
digamma(self.gamma_.T[1]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
logprior -= np.sum((self.gamma_.T[2] - 1) * (
digamma(self.gamma_.T[2]) - digamma(self.gamma_.T[1] +
self.gamma_.T[2])))
return logprior
def _bound_means(self):
"The variational lower bound for the mean parameters"
logprior = 0.
logprior -= 0.5 * squared_norm(self.means_)
logprior -= 0.5 * self.means_.shape[1] * self.n_components
return logprior
def _bound_precisions(self):
"""Returns the bound term related to precisions"""
logprior = 0.
if self.covariance_type == 'spherical':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_
- self.precs_[:, 0])
elif self.covariance_type == 'diag':
logprior += np.sum(gammaln(self.dof_))
logprior -= np.sum(
(self.dof_ - 1) * digamma(np.maximum(0.5, self.dof_)))
logprior += np.sum(- np.log(self.scale_) + self.dof_ - self.precs_)
elif self.covariance_type == 'tied':
logprior += _bound_wishart(self.dof_, self.scale_, self.det_scale_)
elif self.covariance_type == 'full':
for k in range(self.n_components):
logprior += _bound_wishart(self.dof_[k],
self.scale_[k],
self.det_scale_[k])
return logprior
def _bound_proportions(self, z):
"""Returns the bound term related to proportions"""
dg12 = digamma(self.gamma_.T[1] + self.gamma_.T[2])
dg1 = digamma(self.gamma_.T[1]) - dg12
dg2 = digamma(self.gamma_.T[2]) - dg12
cz = np.cumsum(z[:, ::-1], axis=-1)[:, -2::-1]
logprior = np.sum(cz * dg2[:-1]) + np.sum(z * dg1)
del cz # Save memory
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _logprior(self, z):
logprior = self._bound_concentration()
logprior += self._bound_means()
logprior += self._bound_precisions()
logprior += self._bound_proportions(z)
return logprior
def lower_bound(self, X, z):
"""returns a lower bound on model evidence based on X and membership"""
check_is_fitted(self, 'means_')
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
c = np.sum(z * _bound_state_log_lik(X, self._initial_bound +
self.bound_prec_, self.precs_,
self.means_, self.covariance_type))
return c + self._logprior(z)
def _set_weights(self):
for i in xrange(self.n_components):
self.weights_[i] = self.gamma_[i, 1] / (self.gamma_[i, 1]
+ self.gamma_[i, 2])
self.weights_ /= np.sum(self.weights_)
def _fit(self, X, y=None):
"""Estimate model parameters with the variational
algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.random_state_ = check_random_state(self.random_state)
# initialization step
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
z = np.ones((n_samples, self.n_components))
z /= self.n_components
self._initial_bound = - 0.5 * n_features * np.log(2 * np.pi)
self._initial_bound -= np.log(2 * np.pi * np.e)
if (self.init_params != '') or not hasattr(self, 'gamma_'):
self._initialize_gamma()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state_).fit(X).cluster_centers_[::-1]
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components, self.n_components)
if 'c' in self.init_params or not hasattr(self, 'precs_'):
if self.covariance_type == 'spherical':
self.dof_ = np.ones(self.n_components)
self.scale_ = np.ones(self.n_components)
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * n_features * (
digamma(self.dof_) - np.log(self.scale_))
elif self.covariance_type == 'diag':
self.dof_ = 1 + 0.5 * n_features
self.dof_ *= np.ones((self.n_components, n_features))
self.scale_ = np.ones((self.n_components, n_features))
self.precs_ = np.ones((self.n_components, n_features))
self.bound_prec_ = 0.5 * (np.sum(digamma(self.dof_) -
np.log(self.scale_), 1))
self.bound_prec_ -= 0.5 * np.sum(self.precs_, 1)
elif self.covariance_type == 'tied':
self.dof_ = 1.
self.scale_ = np.identity(n_features)
self.precs_ = np.identity(n_features)
self.det_scale_ = 1.
self.bound_prec_ = 0.5 * wishart_log_det(
self.dof_, self.scale_, self.det_scale_, n_features)
self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)
elif self.covariance_type == 'full':
self.dof_ = (1 + self.n_components + n_samples)
self.dof_ *= np.ones(self.n_components)
self.scale_ = [2 * np.identity(n_features)
for _ in range(self.n_components)]
self.precs_ = [np.identity(n_features)
for _ in range(self.n_components)]
self.det_scale_ = np.ones(self.n_components)
self.bound_prec_ = np.zeros(self.n_components)
for k in range(self.n_components):
self.bound_prec_[k] = wishart_log_det(
self.dof_[k], self.scale_[k], self.det_scale_[k],
n_features)
self.bound_prec_[k] -= (self.dof_[k] *
np.trace(self.scale_[k]))
self.bound_prec_ *= 0.5
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
# Expectation step
curr_logprob, z = self.score_samples(X)
current_log_likelihood = (
curr_logprob.mean() + self._logprior(z) / n_samples)
# Check for convergence.
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < self.tol:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, z, self.params)
if self.n_iter == 0:
# Need to make sure that there is a z value to output
# Output zeros because it was just a quick initialization
z = np.zeros((X.shape[0], self.n_components))
self._set_weights()
return z
@deprecated("The `DPGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type='dirichlet_process'` "
"instead. DPGMM is deprecated in 0.18 and will be "
"removed in 0.20.")
class DPGMM(_DPGMMBase):
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0, min_covar=None,
n_iter=10, params='wmc', init_params='wmc'):
super(DPGMM, self).__init__(
n_components=n_components, covariance_type=covariance_type,
alpha=alpha, random_state=random_state, tol=tol, verbose=verbose,
min_covar=min_covar, n_iter=n_iter, params=params,
init_params=init_params)
@deprecated("The `VBGMM` class is not working correctly and it's better "
"to use `sklearn.mixture.BayesianGaussianMixture` class with "
"parameter `weight_concentration_prior_type="
"'dirichlet_distribution'` instead. "
"VBGMM is deprecated in 0.18 and will be removed in 0.20.")
class VBGMM(_DPGMMBase):
"""Variational Inference for the Gaussian Mixture Model
Variational inference for a Gaussian mixture model probability
distribution. This class allows for easy and efficient inference
of an approximate posterior distribution over the parameters of a
Gaussian mixture model with a fixed number of components.
Initialization is with normally-distributed means and identity
covariance, for proper convergence.
Read more in the :ref:`User Guide <vbgmm>`.
Parameters
----------
n_components: int, default 1
Number of mixture components.
covariance_type: string, default 'diag'
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
alpha: float, default 1
Real number representing the concentration parameter of
the dirichlet distribution. Intuitively, the higher the
value of alpha the more likely the variational mixture of
Gaussians model will use all components it can.
tol : float, default 1e-3
Convergence threshold.
n_iter : int, default 10
Maximum number of iterations to perform before convergence.
params : string, default 'wmc'
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars.
init_params : string, default 'wmc'
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default 0
Controls output verbosity.
Attributes
----------
covariance_type : string
String describing the type of covariance parameters used by
the DP-GMM. Must be one of 'spherical', 'tied', 'diag', 'full'.
n_features : int
Dimensionality of the Gaussians.
n_components : int (read-only)
Number of mixture components.
weights_ : array, shape (`n_components`,)
Mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
precs_ : array
Precision (inverse covariance) parameters for each mixture
component. The shape depends on `covariance_type`::
(`n_components`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_components`, `n_features`) if 'diag',
(`n_components`, `n_features`, `n_features`) if 'full'
converged_ : bool
True when convergence was reached in fit(), False
otherwise.
See Also
--------
GMM : Finite Gaussian mixture model fit with EM
DPGMM : Infinite Gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
"""
def __init__(self, n_components=1, covariance_type='diag', alpha=1.0,
random_state=None, tol=1e-3, verbose=0,
min_covar=None, n_iter=10, params='wmc', init_params='wmc'):
super(VBGMM, self).__init__(
n_components, covariance_type, random_state=random_state,
tol=tol, verbose=verbose, min_covar=min_covar,
n_iter=n_iter, params=params, init_params=init_params)
self.alpha = alpha
def _fit(self, X, y=None):
"""Estimate model parameters with the variational algorithm.
For a full derivation and description of the algorithm see
doc/modules/dp-derivation.rst
or
http://scikit-learn.org/stable/modules/dp-derivation.html
A initialization step is performed before entering the EM
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating
the object. Likewise, if you just would like to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
self.alpha_ = float(self.alpha) / self.n_components
return super(VBGMM, self)._fit(X, y)
def score_samples(self, X):
"""Return the likelihood of the data under the model.
Compute the bound on log probability of X under the model
and return the posterior distribution (responsibilities) of
each mixture component for each element of X.
This is done by computing the parameters for the mean-field of
z for each observation.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'gamma_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
dg = digamma(self.gamma_) - digamma(np.sum(self.gamma_))
if self.covariance_type not in ['full', 'tied', 'diag', 'spherical']:
raise NotImplementedError("This ctype is not implemented: %s"
% self.covariance_type)
p = _bound_state_log_lik(X, self._initial_bound + self.bound_prec_,
self.precs_, self.means_,
self.covariance_type)
z = p + dg
z = log_normalize(z, axis=-1)
bound = np.sum(z * p, axis=-1)
return bound, z
def _update_concentration(self, z):
for i in range(self.n_components):
self.gamma_[i] = self.alpha_ + np.sum(z.T[i])
def _initialize_gamma(self):
self.gamma_ = self.alpha_ * np.ones(self.n_components)
def _bound_proportions(self, z):
logprior = 0.
dg = digamma(self.gamma_)
dg -= digamma(np.sum(self.gamma_))
logprior += np.sum(dg.reshape((-1, 1)) * z.T)
z_non_zeros = z[z > np.finfo(np.float32).eps]
logprior -= np.sum(z_non_zeros * np.log(z_non_zeros))
return logprior
def _bound_concentration(self):
logprior = 0.
logprior = gammaln(np.sum(self.gamma_)) - gammaln(self.n_components
* self.alpha_)
logprior -= np.sum(gammaln(self.gamma_) - gammaln(self.alpha_))
sg = digamma(np.sum(self.gamma_))
logprior += np.sum((self.gamma_ - self.alpha_)
* (digamma(self.gamma_) - sg))
return logprior
def _monitor(self, X, z, n, end=False):
"""Monitor the lower bound during iteration
Debug method to help see exactly when it is failing to converge as
expected.
Note: this is very expensive and should not be used by default."""
if self.verbose > 0:
print("Bound after updating %8s: %f" % (n, self.lower_bound(X, z)))
if end:
print("Cluster proportions:", self.gamma_)
print("covariance_type:", self.covariance_type)
def _set_weights(self):
self.weights_[:] = self.gamma_
self.weights_ /= np.sum(self.weights_)
| bsd-3-clause |
gengliangwang/spark | python/pyspark/pandas/tests/plot/test_frame_plot_plotly.py | 1 | 10017 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from distutils.version import LooseVersion
import pprint
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.testing.pandasutils import (
have_plotly,
plotly_requirement_message,
PandasOnSparkTestCase,
TestUtils,
)
from pyspark.pandas.utils import name_like_string
if have_plotly:
from plotly import express
import plotly.graph_objs as go
@unittest.skipIf(not have_plotly, plotly_requirement_message)
@unittest.skipIf(
LooseVersion(pd.__version__) < "1.0.0",
"pandas<1.0; pandas<1.0 does not support latest plotly and/or 'plotting.backend' option.",
)
class DataFramePlotPlotlyTest(PandasOnSparkTestCase, TestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
pd.set_option("plotting.backend", "plotly")
set_option("plotting.backend", "plotly")
set_option("plotting.max_rows", 2000)
set_option("plotting.sample_ratio", None)
@classmethod
def tearDownClass(cls):
pd.reset_option("plotting.backend")
reset_option("plotting.backend")
reset_option("plotting.max_rows")
reset_option("plotting.sample_ratio")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50], "b": [2, 3, 4, 5, 7, 9, 10, 15, 34, 45, 49]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10],
)
@property
def psdf1(self):
return ps.from_pandas(self.pdf1)
def test_line_plot(self):
def check_line_plot(pdf, psdf):
self.assertEqual(pdf.plot(kind="line"), psdf.plot(kind="line"))
self.assertEqual(pdf.plot.line(), psdf.plot.line())
pdf1 = self.pdf1
psdf1 = self.psdf1
check_line_plot(pdf1, psdf1)
def test_area_plot(self):
def check_area_plot(pdf, psdf):
self.assertEqual(pdf.plot(kind="area"), psdf.plot(kind="area"))
self.assertEqual(pdf.plot.area(), psdf.plot.area())
pdf = self.pdf1
psdf = self.psdf1
check_area_plot(pdf, psdf)
def test_area_plot_y(self):
def check_area_plot_y(pdf, psdf, y):
self.assertEqual(pdf.plot.area(y=y), psdf.plot.area(y=y))
# test if frame area plot is correct when y is specified
pdf = pd.DataFrame(
{
"sales": [3, 2, 3, 9, 10, 6],
"signups": [5, 5, 6, 12, 14, 13],
"visits": [20, 42, 28, 62, 81, 50],
},
index=pd.date_range(start="2018/01/01", end="2018/07/01", freq="M"),
)
psdf = ps.from_pandas(pdf)
check_area_plot_y(pdf, psdf, y="sales")
def test_barh_plot_with_x_y(self):
def check_barh_plot_with_x_y(pdf, psdf, x, y):
self.assertEqual(pdf.plot(kind="barh", x=x, y=y), psdf.plot(kind="barh", x=x, y=y))
self.assertEqual(pdf.plot.barh(x=x, y=y), psdf.plot.barh(x=x, y=y))
# this is testing plot with specified x and y
pdf1 = pd.DataFrame({"lab": ["A", "B", "C"], "val": [10, 30, 20]})
psdf1 = ps.from_pandas(pdf1)
check_barh_plot_with_x_y(pdf1, psdf1, x="lab", y="val")
def test_barh_plot(self):
def check_barh_plot(pdf, psdf):
self.assertEqual(pdf.plot(kind="barh"), psdf.plot(kind="barh"))
self.assertEqual(pdf.plot.barh(), psdf.plot.barh())
# this is testing when x or y is not assigned
pdf1 = pd.DataFrame({"lab": [20.1, 40.5, 60.6], "val": [10, 30, 20]})
psdf1 = ps.from_pandas(pdf1)
check_barh_plot(pdf1, psdf1)
def test_bar_plot(self):
def check_bar_plot(pdf, psdf):
self.assertEqual(pdf.plot(kind="bar"), psdf.plot(kind="bar"))
self.assertEqual(pdf.plot.bar(), psdf.plot.bar())
pdf1 = self.pdf1
psdf1 = self.psdf1
check_bar_plot(pdf1, psdf1)
def test_bar_with_x_y(self):
# this is testing plot with specified x and y
pdf = pd.DataFrame({"lab": ["A", "B", "C"], "val": [10, 30, 20]})
psdf = ps.from_pandas(pdf)
self.assertEqual(
pdf.plot(kind="bar", x="lab", y="val"), psdf.plot(kind="bar", x="lab", y="val")
)
self.assertEqual(pdf.plot.bar(x="lab", y="val"), psdf.plot.bar(x="lab", y="val"))
def test_scatter_plot(self):
def check_scatter_plot(pdf, psdf, x, y, c):
self.assertEqual(pdf.plot.scatter(x=x, y=y), psdf.plot.scatter(x=x, y=y))
self.assertEqual(
pdf.plot(kind="scatter", x=x, y=y), psdf.plot(kind="scatter", x=x, y=y)
)
# check when keyword c is given as name of a column
self.assertEqual(
pdf.plot.scatter(x=x, y=y, c=c, s=50), psdf.plot.scatter(x=x, y=y, c=c, s=50)
)
# Use pandas scatter plot example
pdf1 = pd.DataFrame(np.random.rand(50, 4), columns=["a", "b", "c", "d"])
psdf1 = ps.from_pandas(pdf1)
check_scatter_plot(pdf1, psdf1, x="a", y="b", c="c")
def test_pie_plot(self):
def check_pie_plot(psdf):
pdf = psdf.to_pandas()
self.assertEqual(
psdf.plot(kind="pie", y=psdf.columns[0]),
express.pie(pdf, values="a", names=pdf.index),
)
self.assertEqual(
psdf.plot(kind="pie", values="a"), express.pie(pdf, values="a"),
)
psdf1 = self.psdf1
check_pie_plot(psdf1)
# TODO: support multi-index columns
# columns = pd.MultiIndex.from_tuples([("x", "y"), ("y", "z")])
# psdf1.columns = columns
# check_pie_plot(psdf1)
# TODO: support multi-index
# psdf1 = ps.DataFrame(
# {
# "a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50],
# "b": [2, 3, 4, 5, 7, 9, 10, 15, 34, 45, 49]
# },
# index=pd.MultiIndex.from_tuples([("x", "y")] * 11),
# )
# check_pie_plot(psdf1)
def test_hist_plot(self):
def check_hist_plot(psdf):
bins = np.array([1.0, 5.9, 10.8, 15.7, 20.6, 25.5, 30.4, 35.3, 40.2, 45.1, 50.0])
data = [
np.array([5.0, 4.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]),
np.array([4.0, 3.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 1.0, 1.0]),
]
prev = bins[0]
text_bins = []
for b in bins[1:]:
text_bins.append("[%s, %s)" % (prev, b))
prev = b
text_bins[-1] = text_bins[-1][:-1] + "]"
bins = 0.5 * (bins[:-1] + bins[1:])
name_a = name_like_string(psdf.columns[0])
name_b = name_like_string(psdf.columns[1])
bars = [
go.Bar(
x=bins,
y=data[0],
name=name_a,
text=text_bins,
hovertemplate=("variable=" + name_a + "<br>value=%{text}<br>count=%{y}"),
),
go.Bar(
x=bins,
y=data[1],
name=name_b,
text=text_bins,
hovertemplate=("variable=" + name_b + "<br>value=%{text}<br>count=%{y}"),
),
]
fig = go.Figure(data=bars, layout=go.Layout(barmode="stack"))
fig["layout"]["xaxis"]["title"] = "value"
fig["layout"]["yaxis"]["title"] = "count"
self.assertEqual(
pprint.pformat(psdf.plot(kind="hist").to_dict()), pprint.pformat(fig.to_dict())
)
psdf1 = self.psdf1
check_hist_plot(psdf1)
columns = pd.MultiIndex.from_tuples([("x", "y"), ("y", "z")])
psdf1.columns = columns
check_hist_plot(psdf1)
def test_kde_plot(self):
psdf = ps.DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 3, 5, 7, 9], "c": [2, 4, 6, 8, 10]})
pdf = pd.DataFrame(
{
"Density": [
0.03515491,
0.06834979,
0.00663503,
0.02372059,
0.06834979,
0.01806934,
0.01806934,
0.06834979,
0.02372059,
],
"names": ["a", "a", "a", "b", "b", "b", "c", "c", "c"],
"index": [-3.5, 5.5, 14.5, -3.5, 5.5, 14.5, -3.5, 5.5, 14.5],
}
)
actual = psdf.plot.kde(bw_method=5, ind=3)
expected = express.line(pdf, x="index", y="Density", color="names")
expected["layout"]["xaxis"]["title"] = None
self.assertEqual(pprint.pformat(actual.to_dict()), pprint.pformat(expected.to_dict()))
if __name__ == "__main__":
from pyspark.pandas.tests.plot.test_frame_plot_plotly import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
fourpartswater/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
megahertz0/tusharedemo | kline_tushare.py | 1 | 3639 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 15 17:31:36 2016
@author: megahertz
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
import tushare as ts
import datetime
import time
from matplotlib.dates import DateFormatter, WeekdayLocator, DayLocator, MONDAY,YEARLY
from matplotlib.finance import fetch_historical_yahoo
from matplotlib.lines import Line2D, TICKLEFT, TICKRIGHT
from matplotlib.patches import Rectangle
from matplotlib.dates import date2num
#zhfont = mpl.font_manager.FontProperties(fname='/usr/share/fonts/truetype/droid/DroidSansFallbackFull.ttf')
begin_time = '2015-08-01'
end_time = '2015-11-01'
def _candlestick(ax, df, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
Plot the time, open, high, low, close as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
Parameters
----------
ax : `Axes`
an Axes instance to plot to
df : pandas data from tushare
width : float
fraction of a day for the rectangle width
colorup : color
the color of the rectangle where close >= open
colordown : color
the color of the rectangle where close < open
alpha : float
the rectangle alpha level
ochl: bool
argument to select between ochl and ohlc ordering of quotes
Returns
-------
ret : tuple
returns (lines, patches) where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
OFFSET = width / 2.0
lines = []
patches = []
for date_string,row in df.iterrows():
date_time = datetime.datetime.strptime(date_string,'%Y-%m-%d')
t = date2num(date_time)
open, high, close, low = row[:4]
if close >= open:
color = colorup
lower = open
height = close - open
else:
color = colordown
lower = close
height = open - close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy=(t - OFFSET, lower),
width=width,
height=height,
facecolor=color,
edgecolor=color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax.add_line(vline)
ax.add_patch(rect)
ax.autoscale_view()
return lines, patches
def drawPic(df, code, name):
mondays = WeekdayLocator(MONDAY) # 主要刻度
alldays = DayLocator() # 次要刻度
#weekFormatter = DateFormatter('%b %d') # 如:Jan 12
mondayFormatter = DateFormatter('%m-%d-%Y') # 如:2-29-2015
dayFormatter = DateFormatter('%d') # 如:12
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2)
ax.xaxis.set_major_locator(mondays)
ax.xaxis.set_minor_locator(alldays)
ax.xaxis.set_major_formatter(mondayFormatter)
_candlestick(ax, df, width=0.6, colorup='r', colordown='g')
ax.xaxis_date()
ax.autoscale_view()
plt.setp(plt.gca().get_xticklabels(), rotation=45, horizontalalignment='right')
ax.grid(True)
plt.title(name + ' ' + code)
plt.show()
def makePicture(code, name):
df = ts.get_hist_data(code, start=begin_time, end=end_time)
df = df.sort_index(0)
# df.plot()
drawPic(df, code, name)
makePicture('600028', '中国石化')
| lgpl-3.0 |
cmoutard/mne-python | examples/stats/plot_cluster_stats_evoked.py | 18 | 2991 | """
=======================================================
Permutation F-test on sensor data with 1D cluster level
=======================================================
One tests if the evoked response is significantly different
between conditions. Multiple comparison problem is addressed
with cluster level permutation test.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id = 1
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.read_events(event_fname)
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
###############################################################################
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition1 = epochs1.get_data() # as 3D matrix
event_id = 2
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition2 = epochs2.get_data() # as 3D matrix
condition1 = condition1[:, 0, :] # take only one channel to get a 2D array
condition2 = condition2[:, 0, :] # take only one channel to get a 2D array
###############################################################################
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([condition1, condition2], n_permutations=1000,
threshold=threshold, tail=1, n_jobs=2)
###############################################################################
# Plot
times = epochs1.times
plt.close('all')
plt.subplot(211)
plt.title('Channel : ' + channel)
plt.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
label="ERF Contrast (Event 1 - Event 2)")
plt.ylabel("MEG (T / m)")
plt.legend()
plt.subplot(212)
for i_c, c in enumerate(clusters):
c = c[0]
if cluster_p_values[i_c] <= 0.05:
h = plt.axvspan(times[c.start], times[c.stop - 1],
color='r', alpha=0.3)
else:
plt.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
alpha=0.3)
hf = plt.plot(times, T_obs, 'g')
plt.legend((h, ), ('cluster p-value < 0.05', ))
plt.xlabel("time (ms)")
plt.ylabel("f-values")
plt.show()
| bsd-3-clause |
nlhepler/idepi | idepi/feature_extraction/_pairwisesitevectorizer.py | 1 | 3511 |
from numpy import zeros
from sklearn.base import BaseEstimator, TransformerMixin
from idepi.filters import null_filter
from idepi.labeledmsa import LabeledMSA
__all__ = ['PairwiseSiteVectorizer']
class PairwiseSiteVectorizer(BaseEstimator, TransformerMixin):
def __init__(self, encoder, filter=null_filter, radius=0):
if not isinstance(radius, int) or radius < 0:
raise ValueError('radius expects a positive integer')
self.__alignment_length = 0
self.encoder = encoder
self.filter = filter
self.radius = radius
self.feature_names_ = []
self.vocabulary_ = {}
def fit(self, alignment):
if not isinstance(alignment, LabeledMSA):
raise ValueError("PairwiseSiteVectorizers require a LabeledMSA")
valid_columns = [
len(self.__filter(alignment[:, i])) > 0
for i in range(alignment.get_alignment_length())]
calls = set()
for seq in alignment:
seq_ = str(seq.seq).upper()
for i, ltr1 in enumerate(seq_[:-1]):
if not valid_columns[i]:
continue
a = i + 1
b = i + self.radius + 1
try:
u = self.encoder(ltr1)
for j, ltr2 in enumerate(seq_[a:b], start=a):
if not valid_columns[j]:
continue
try:
v = self.encoder(ltr2)
calls.add((i, u, j, v))
except KeyError:
pass
except KeyError:
pass
column_labels = list(alignment.labels)
feature_names = []
vocab = {}
for i, k in enumerate(sorted(calls)):
vocab[k] = i
idx1, ltr1, idx2, ltr2 = k
feature_names.append('{0:s}{1:s}+{2:s}{3:s}'.format(
column_labels[idx1],
self.encoder[ltr1],
column_labels[idx2],
self.encoder[ltr2]
))
self.__alignment_length = alignment.get_alignment_length()
self.feature_names_ = feature_names
self.vocabulary_ = vocab
return self
def transform(self, alignment):
ncol = alignment.get_alignment_length()
if ncol != self.__alignment_length:
msg = 'alignment length ({0:d}) does not match the learned length ({1:d})'.format(
ncol,
self.__alignment_length
)
raise ValueError(msg)
vocab = self.vocabulary_
data = zeros((len(alignment), len(vocab)), dtype=int)
if len(vocab) == 0:
return data
for row, seq in enumerate(alignment):
seq_ = str(seq.seq).upper()
for i, ltr1 in enumerate(seq_[:-1]):
a = i + 1
b = i + self.radius + 1
try:
u = self.encoder(ltr1)
for j, ltr2 in enumerate(seq_[a:b], start=a):
try:
v = self.encoder(ltr2)
k = (i, u, j, v)
data[row, vocab[k]] = 1
except KeyError:
pass
except KeyError:
pass
return data
def get_feature_names(self):
return self.feature_names_
| gpl-3.0 |
mauzeh/formation-flight | validation/get_max_fuel_burn.py | 1 | 2818 | import math
import lib.sim
from lib.debug import print_dictionary
from lib.geo.util import get_fuel_burned_during_cruise
from lib.geo.util import formationburn
from lib.geo.util import get_weight_ratio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams['font.size'] = 15.
matplotlib.rcParams['font.family'] = 'serif'
matplotlib.rcParams['axes.labelsize'] = 14.
matplotlib.rcParams['xtick.labelsize'] = 14.
matplotlib.rcParams['ytick.labelsize'] = 14.
W_1 = 250000
model = {
'name' : 'B772',
'V' : 500,
'c_T' : .6,
'L_D' : 17,
'W_1' : W_1
}
x = range(1, 11)
y1 = []
y2 = []
for j in x:
vars = {}
vars['formation_size'] = j
vars['origin_to_destination'] = 3000
vars['origin_to_hub'] = 100
vars['hookoff_to_destination'] = 150
vars['hub_to_hookoff'] = (
vars['origin_to_destination'] -
vars['origin_to_hub'] -
vars['hookoff_to_destination']
)
vars['fuel_benchmark'] = 0
vars['fuel_formation'] = 0
model['W_1'] = W_1
vars['fuel_benchmark'] = vars['formation_size'] *\
get_fuel_burned_during_cruise(vars['origin_to_destination'], model)
incurs_benefit = False
for i in range(0, vars['formation_size']):
model['W_1'] = W_1
if incurs_benefit is True:
discount = .25
else:
discount = 0
vars['fuel_formation'] += (
formationburn(
vars['origin_to_hub'],
vars['hub_to_hookoff'],
vars['hookoff_to_destination'],
model, discount
)
)
incurs_benefit = True
vars['fuel_saved'] = vars['fuel_benchmark'] - vars['fuel_formation']
vars['fuel_saved_rel'] = (
vars['fuel_saved'] / vars['fuel_benchmark']
)
y1.append(vars['fuel_saved_rel'])
y2.append((j-1) * .25 / j)
print_dictionary(vars)
print r'$n$ & $F_{s, max}$ & $\alpha_{max}$ \\'
for i in x:
print r'%d & %.1f\%% & %.1f\%% \\' % (
i,
100 * y1[i-1],
100 * y2[i-1]
)
plt.plot(x, y2, marker = 'o', markersize = 10, label = r'Maximum Discount $\alpha_{max}$')
plt.plot(x, y1, marker = 'o', markersize = 10, label = r'Maximum Fuel Savings $F_{s,max}$')
plt.title('Upper limits on formation benefits')
plt.xlabel(r'Formation size $N$')
plt.ylabel(r'Benefit')
#plt.yticks([
# 0,.01,.02,.03,.04,.05,.06,.07,.08,.09,.1,.11,.12,.13
#],[
# '0%', '1%', '2%', '3%', '4%', '5%', '6%', '7%', '8%', '9%', '10%', '11%',
# '12%', '13%'
#])
plt.ylim([0, .25])
plt.xlim(1, len(x))
plt.xticks(x)
plt.legend(loc = 'lower right')
#plt.show()
plt.savefig('plots/upper_limits/plot.pdf')
def execute():
pass | mit |
dhruv13J/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
kernc/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
nikitasingh981/scikit-learn | doc/conf.py | 22 | 9789 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
import sphinx_gallery
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpy_ext.numpydoc',
'sphinx.ext.linkcode', 'sphinx.ext.doctest',
'sphinx_gallery.gen_gallery',
'sphinx_issues',
]
# pngmath / imgmath compatibility layer for different sphinx versions
import sphinx
from distutils.version import LooseVersion
if LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2007 - 2017, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
sphinx_gallery_conf = {
'doc_module': 'sklearn',
'reference_url': {
'sklearn': None,
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
'nibabel': 'http://nipy.org/nibabel'}
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'sphx_glr_plot_classifier_comparison_001.png': 600,
'sphx_glr_plot_outlier_detection_003.png': 372,
'sphx_glr_plot_gpr_co2_001.png': 350,
'sphx_glr_plot_adaboost_twoclass_001.png': 372,
'sphx_glr_plot_compare_methods_001.png': 349}
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print('Preparing carousel images')
image_dir = os.path.join(app.builder.outdir, '_images')
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + '_carousel.png')
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
# Config for sphinx_issues
issues_uri = 'https://github.com/scikit-learn/scikit-learn/issues/{issue}'
issues_github_path = 'scikit-learn/scikit-learn'
issues_user_uri = 'https://github.com/{user}'
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('build-finished', make_carousel_thumbs)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
alvason/probability-insighter | code/gaussian_random_distribution.py | 1 | 10467 |
# coding: utf-8
# # Probability-insighter
# https://github.com/alvason/probability-insighter
#
# Gaussian random distribution (standard normal distribution)
# In[1]:
'''
author: Alvason Zhenhua Li
date: 03/19/2015
'''
get_ipython().magic('matplotlib inline')
import numpy as np
import matplotlib.pyplot as plt
import os
import alva_machinery_probability as alva
AlvaFontSize = 23
AlvaFigSize = (16, 7)
numberingFig = 0
# for saving figure
saving_dir_path = '/Users/azl/Desktop/GitHub/probability-insighter/figure'
file_name = 'gaussian-distribution'
# plotting
figure_name = '-equation'
file_suffix = '.png'
save_figure = os.path.join(saving_dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize=(9, 6))
plt.axis('off')
plt.title(r'$ Gaussian-distribution---probability-density-function $',fontsize = AlvaFontSize)
plt.text(0, 5.0/6, r'$ P(\sigma, \mu|x) = \frac{1}{(2\pi)^{1/2}\sigma} exp[-\frac{1}{2}(\frac{x - \mu}{\sigma})^2] $',
fontsize = 1.2*AlvaFontSize)
plt.text(0, 3.0/6, r'$ 1-- \mu \ is \ the \ mean \ number $',
fontsize = AlvaFontSize)
plt.text(0, 2.0/6, r'$ 2-- \sigma \ is \ the \ standard-deviation \ $', fontsize = AlvaFontSize)
plt.text(0, 1.0/6, r'$ 3-- \ P(\sigma, \mu|x) \ is \ the \ probability $',
fontsize = AlvaFontSize)
plt.savefig(save_figure, dpi = 300)
plt.show()
# In[2]:
def gaussianPMF(total_event, meanP, deviationP):
x_event = np.arange(1, total_event + 1)
constantD = (2*np.pi)**(0.5) * deviationP
constantN = 1.0 / constantD
y_PMF = constantN * np.exp(-(0.5)*((x_event - meanP)/deviationP)**2)
return np.array([x_event, y_PMF])
total_event = int(60)
meanP = total_event/2.0
deviationP = 9
gaussian_PMF = gaussianPMF(total_event, meanP, deviationP)
gaussian_CDF = np.array([gaussian_PMF[0], np.cumsum(gaussian_PMF[1])])
print ('total-probability = {:}'.format(gaussian_PMF[1].sum()))
figure_name = '-distribution-pmf-cdf'
file_suffix = '.png'
save_figure = os.path.join(saving_dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
# plotting1
figure = plt.figure(numberingFig, figsize = AlvaFigSize)
plot1 = figure.add_subplot(1, 2, 1)
plot1.plot(gaussian_PMF[0], gaussian_PMF[1], marker ='o', color = 'green')
plt.title(r'$ Gaussian \ distribution-PDF $', fontsize = AlvaFontSize)
plt.xlabel(r'$ n-event \ with \ (m = {:}, \sigma = {:}) $'.format(meanP, deviationP), fontsize = AlvaFontSize)
plt.ylabel(r'$ P(\sigma, \mu|n) $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.grid()
# plotting2
plot2 = figure.add_subplot(1, 2, 2)
plot2.plot(gaussian_CDF[0], gaussian_CDF[1], marker ='o', color = 'red')
plt.title(r'$ Gaussian \ distribution-CDF $', fontsize = AlvaFontSize)
plt.xlabel(r'$ n-event \ with \ (m = {:}, \sigma = {:}) $'.format(meanP, deviationP), fontsize = AlvaFontSize)
plt.ylabel(r'$ P(\sigma, \mu|n) $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.grid()
figure.tight_layout()
plt.show()
plt.savefig(save_figure, dpi = 300, bbox_inches = 'tight')
# In[3]:
def binomialPMF(total_event, p):
x_event = np.arange(1, total_event + 1)
y_PMF = alva.productA(total_event) / (alva.productA(x_event) * alva.productA(total_event - x_event)) * p**x_event * (1 - p)**(total_event - x_event)
return np.array([x_event, y_PMF])
total_event = int(60)
p = 0.5
binomial_PMF = binomialPMF(total_event, p)
print ('total-probability = {:f}'.format(binomial_PMF[1].sum()))
# plotting1
figure = plt.figure(numberingFig, figsize = AlvaFigSize)
plot1 = figure.add_subplot(1, 2, 1)
plot1.plot(binomial_PMF[0], binomial_PMF[1], marker ='o', color = 'green')
plt.title(r'$ Binomial \ distribution-PMF \ (p={:}) $'.format(p), fontsize = AlvaFontSize)
plt.xlabel(r'$ n-event \ within \ total-event \ (N={:}) $'.format(total_event), fontsize = AlvaFontSize)
plt.ylabel(r'$ P(n|N) $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.grid()
# plotting2
binomial_CDF = np.array([binomial_PMF[0], np.cumsum(binomial_PMF[1])])
plot2 = figure.add_subplot(1, 2, 2)
plot2.plot(binomial_CDF[0], binomial_CDF[1], marker ='o', color = 'red')
plt.title(r'$ Binomial \ distribution-CDF \ (p={:}) $'.format(p), fontsize = AlvaFontSize)
plt.xlabel(r'$ n-event \ within \ total-event \ (N={:}) $'.format(total_event), fontsize = AlvaFontSize)
plt.ylabel(r'$ P(n|N) $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.grid()
figure.tight_layout()
plt.show()
# In[4]:
raw_data = np.random.standard_normal(size = 3000)
raw_data = np.random.normal(size = 3000)
raw_PMF = alva.AlvaPDF(raw_data, total_level = total_event, empty_leveler_filter = False)
raw_CDF = np.array([raw_PMF[0], np.cumsum(raw_PMF[1])])
# plotting1
figure = plt.figure(numberingFig, figsize = AlvaFigSize)
plot1 = figure.add_subplot(1, 2, 1)
plot1.plot(raw_PMF[0], raw_PMF[1], marker ='o', color = 'green')
plt.title(r'$ Raw \ distribution-PMF $', fontsize = AlvaFontSize)
plt.xlabel(r'$ n-event \ within \ total-event \ (N={:}) $'.format(len(raw_PMF[0])), fontsize = AlvaFontSize)
plt.ylabel(r'$ P(n|N) $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.grid()
# plotting2
plot2 = figure.add_subplot(1, 2, 2)
plot2.plot(raw_CDF[0], raw_CDF[1], marker ='o', color = 'red')
plt.title(r'$ Raw \ distribution-CDF $', fontsize = AlvaFontSize)
plt.xlabel(r'$ n-event \ within \ total-event \ (N={:}) $'.format(total_event), fontsize = AlvaFontSize)
plt.ylabel(r'$ P(n|N) $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.grid()
figure.tight_layout()
plt.show()
# In[5]:
### plotting (quantile-quantile)
figure_name = '-distribution-quantile'
file_suffix = '.png'
save_figure = os.path.join(saving_dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
figure = plt.figure(numberingFig, figsize = (9, 6))
window = figure.add_subplot(1, 1, 1)
window.set_title(r'$ Quantile-Quantile $', fontsize = AlvaFontSize)
### boundary line
window.plot(np.arange(0, 1, 0.001), np.arange(0, 1, 0.001), color = 'black', linewidth = 1)
### model[0] ###
window.plot(raw_CDF[1], gaussian_CDF[1], marker = 'o', markersize = 10, color = 'green', alpha = 0.6
, label = '$ gaussian $')
###
window.set_xlabel('$ Empirical \ Quantiles $', fontsize = AlvaFontSize)
window.set_ylabel('$ Gaussian \ Quantiles $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.9)
plt.yticks(fontsize = AlvaFontSize*0.9)
window.grid(True)
window.legend(loc = 'upper left', fontsize = AlvaFontSize*0.85)
### model[1] ###
plot2 = window.twinx()
plot2.plot(raw_CDF[1], binomial_CDF[1], marker = '*', markersize = 10, color = 'red', alpha = 0.6
, label = '$ binomial $')
plot2.set_ylabel('$ Binomial \ Quantiles $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.9)
plt.yticks(fontsize = AlvaFontSize*0.9)
plot2.grid(True)
plot2.legend(loc = 'lower right', fontsize = AlvaFontSize*0.85)
figure.tight_layout()
plt.savefig(save_figure, dpi = 300, bbox_inches = 'tight')
plt.show()
# In[6]:
'''Gaussian randomness --- Gaussian distribution --- Standard normal distribution'''
total_event = int(1000)
gInput = np.arange(total_event)
randomSeed = np.random.standard_normal(total_event)
sumP = 0
for i in range(total_event):
sumP = sumP + randomSeed[i]
meanP = sumP/(total_event)
sumP = 0
for i in range(total_event):
sumP = sumP + (meanP - randomSeed[i])**2
deviationP = (sumP/total_event)**(1.0/2)
totalLevel = int(total_event/10)
category = alva.AlvaLevel(randomSeed, totalLevel, False)
gLevel = category[0]
numberLevel = category[1]
maxEvent_per_level = alva.AlvaMinMax(numberLevel)[-1]
print ('max-events/level = {:}'.format(maxEvent_per_level))
gaussian_D = maxEvent_per_level * gaussianPMF(len(gLevel), meanP, deviationP)[1]
# plotting
figure_name = ''
file_suffix = '.png'
save_figure = os.path.join(saving_dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
figure = plt.figure(numberingFig, figsize = AlvaFigSize)
plot1 = figure.add_subplot(1, 2, 1)
plot1.plot(gInput, randomSeed, color = 'gray', marker = 'o', label = 'data')
plot1.plot(gInput, alva.AlvaMinMax(randomSeed), color = 'red', marker = 'o', label = 'minMaxListing')
if total_event < 100:
plot1.set_xticks(gInput, minor = True)
plot1.set_yticks(randomSeed, minor = True)
plot1.grid(True, which = 'minor')
else:
plot1.grid(True, which = 'major')
plt.title(r'$ Gaussian \ (mean = {:1.3f},\ deviation = {:1.3f}) $'.format(meanP, deviationP),
fontsize = AlvaFontSize)
plt.xlabel(r'$ event-input $', fontsize = AlvaFontSize)
plt.ylabel(r'$ value-output $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.legend(loc = (0, -0.2))
plot2 = figure.add_subplot(1, 2, 2)
plot2.plot(numberLevel, gLevel, color = 'red', marker = 'o', label = 'category')
plot2.plot(gaussian_D, gLevel, color = 'blue', marker = 'o', label = 'Gaussian')
plot2.hist(randomSeed, bins = totalLevel, alpha = 0.1, orientation = 'horizontal', rwidth = 0.6)
if total_event < 100:
plot2.set_xticks(numberLevel, minor = True)
plot2.set_yticks(gLevel, minor = True)
plot2.grid(True, which = 'minor')
else:
plot2.grid(True, which = 'major')
plt.title(r'$ Gaussian \ (events = {:},\ levels = {:}) $'.format(total_event, totalLevel)
, fontsize = AlvaFontSize)
plt.xlabel(r'$ events/level $', fontsize = AlvaFontSize)
plt.ylabel(r'$ value-level $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.legend(loc = (0, -0.2))
figure.tight_layout()
plt.savefig(save_figure, dpi = 300)
plt.show()
# In[8]:
def AlvaIntegrateArea(out_i, min_i, max_i, totalGPoint_i):
spacing_i = np.linspace(min_i, max_i, num = totalGPoint_i, retstep = True)
grid_i = spacing_i[0]
dx = spacing_i[1]
outArea = np.sum(out_i(grid_i[:]))*dx
return (outArea)
def gaussianA(i):
inOut = np.exp(-i**2)
return (inOut)
ggg = AlvaIntegrateArea(gaussianA, -10, 10, 100)
print (ggg)
ppp = (np.pi)**(1.0/2)
print (ppp)
# In[ ]:
| gpl-2.0 |
jreback/pandas | pandas/tests/reshape/merge/test_merge_cross.py | 2 | 2794 | import pytest
from pandas import DataFrame
import pandas._testing as tm
from pandas.core.reshape.merge import MergeError, merge
@pytest.mark.parametrize(
("input_col", "output_cols"), [("b", ["a", "b"]), ("a", ["a_x", "a_y"])]
)
def test_merge_cross(input_col, output_cols):
# GH#5401
left = DataFrame({"a": [1, 3]})
right = DataFrame({input_col: [3, 4]})
left_copy = left.copy()
right_copy = right.copy()
result = merge(left, right, how="cross")
expected = DataFrame({output_cols[0]: [1, 1, 3, 3], output_cols[1]: [3, 4, 3, 4]})
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(left, left_copy)
tm.assert_frame_equal(right, right_copy)
@pytest.mark.parametrize(
"kwargs",
[
{"left_index": True},
{"right_index": True},
{"on": "a"},
{"left_on": "a"},
{"right_on": "b"},
],
)
def test_merge_cross_error_reporting(kwargs):
# GH#5401
left = DataFrame({"a": [1, 3]})
right = DataFrame({"b": [3, 4]})
msg = (
"Can not pass on, right_on, left_on or set right_index=True or "
"left_index=True"
)
with pytest.raises(MergeError, match=msg):
merge(left, right, how="cross", **kwargs)
def test_merge_cross_mixed_dtypes():
# GH#5401
left = DataFrame(["a", "b", "c"], columns=["A"])
right = DataFrame(range(2), columns=["B"])
result = merge(left, right, how="cross")
expected = DataFrame({"A": ["a", "a", "b", "b", "c", "c"], "B": [0, 1, 0, 1, 0, 1]})
tm.assert_frame_equal(result, expected)
def test_merge_cross_more_than_one_column():
# GH#5401
left = DataFrame({"A": list("ab"), "B": [2, 1]})
right = DataFrame({"C": range(2), "D": range(4, 6)})
result = merge(left, right, how="cross")
expected = DataFrame(
{
"A": ["a", "a", "b", "b"],
"B": [2, 2, 1, 1],
"C": [0, 1, 0, 1],
"D": [4, 5, 4, 5],
}
)
tm.assert_frame_equal(result, expected)
def test_merge_cross_null_values(nulls_fixture):
# GH#5401
left = DataFrame({"a": [1, nulls_fixture]})
right = DataFrame({"b": ["a", "b"], "c": [1.0, 2.0]})
result = merge(left, right, how="cross")
expected = DataFrame(
{
"a": [1, 1, nulls_fixture, nulls_fixture],
"b": ["a", "b", "a", "b"],
"c": [1.0, 2.0, 1.0, 2.0],
}
)
tm.assert_frame_equal(result, expected)
def test_join_cross_error_reporting():
# GH#5401
left = DataFrame({"a": [1, 3]})
right = DataFrame({"a": [3, 4]})
msg = (
"Can not pass on, right_on, left_on or set right_index=True or "
"left_index=True"
)
with pytest.raises(MergeError, match=msg):
left.join(right, how="cross", on="a")
| bsd-3-clause |
hrjn/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
michigraber/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <jakevdp@cs.washington.edu>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
LIKAIMO/MissionPlanner | Lib/site-packages/scipy/signal/filter_design.py | 53 | 63381 | """Filter design.
"""
import types
import warnings
import numpy
from numpy import atleast_1d, poly, polyval, roots, real, asarray, allclose, \
resize, pi, absolute, logspace, r_, sqrt, tan, log10, arctan, arcsinh, \
cos, exp, cosh, arccosh, ceil, conjugate, zeros, sinh
from numpy import mintypecode
from scipy import special, optimize
from scipy.misc import comb
class BadCoefficients(UserWarning):
pass
abs = absolute
def findfreqs(num, den, N):
ep = atleast_1d(roots(den))+0j
tz = atleast_1d(roots(num))+0j
if len(ep) == 0:
ep = atleast_1d(-1000)+0j
ez = r_['-1',numpy.compress(ep.imag >=0, ep,axis=-1), numpy.compress((abs(tz) < 1e5) & (tz.imag >=0),tz,axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3*abs(ez.real + integ)+1.5*ez.imag))+0.5)
lfreq = numpy.around(numpy.log10(0.1*numpy.min(abs(real(ez+integ))+2*ez.imag))-0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the numerator (b) and denominator (a) of a filter compute its
frequency response::
b[0]*(jw)**(nb-1) + b[1]*(jw)**(nb-2) + ... + b[nb-1]
H(w) = -------------------------------------------------------
a[0]*(jw)**(na-1) + a[1]*(jw)**(na-2) + ... + a[na-1]
Parameters
----------
b : ndarray
Numerator of a linear filter.
a : ndarray
Denominator of a linear filter.
worN : {None, int}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, the compute at that many frequencies. Otherwise, compute the
response at frequencies given in worN.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The frequencies at which h was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude.
"""
if worN is None:
w = findfreqs(b,a,200)
elif isinstance(worN, types.IntType):
N = worN
w = findfreqs(b,a,N)
else:
w = worN
w = atleast_1d(w)
s = 1j*w
h = polyval(b, s) / polyval(a, s)
if not plot is None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=0, plot=None):
"""
Compute the frequency response of a digital filter.
Given the numerator ``b`` and denominator ``a`` of a digital filter compute
its frequency response::
jw -jw -jmw
jw B(e) b[0] + b[1]e + .... + b[m]e
H(e) = ---- = ------------------------------------
jw -jw -jnw
A(e) a[0] + a[1]e + .... + a[n]e
Parameters
----------
b : ndarray
numerator of a linear filter
a : ndarray
denominator of a linear filter
worN : {None, int}, optional
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole : bool, optional
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is False, compute frequencies from 0 to 2*pi.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The frequencies at which h was computed.
h : ndarray
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude.
Examples
--------
>>> b = firwin(80, 0.5, window=('kaiser', 8))
>>> h, w = freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.semilogy(h, np.abs(w), 'b')
>>> plt.ylabel('Amplitude (dB)', color='b')
>>> plt.xlabel('Frequency (rad/sample)')
>>> plt.grid()
>>> plt.legend()
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(w))
>>> plt.plot(h, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.show()
"""
b, a = map(atleast_1d, (b,a))
if whole:
lastpoint = 2*pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.arange(0,lastpoint,lastpoint/N)
elif isinstance(worN, types.IntType):
N = worN
w = numpy.arange(0,lastpoint,lastpoint/N)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j*w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if not plot is None:
plot(w, h)
return w, h
def tf2zpk(b, a):
"""Return zero, pole, gain (z,p,k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : ndarray
Numerator polynomial.
a : ndarray
Denominator polynomial.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
If some values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b,a = normalize(b,a)
b = (b+0.0) / a[0]
a = (a+0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""Return polynomial transfer function representation from zeros
and poles
Parameters
----------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial.
a : ndarray
Denominator polynomial.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1]+1), temp.dtype.char)
if len(k) == 1:
k = [k[0]]*z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
return b, a
def normalize(b, a):
"""Normalize polynomial representation of a transfer function.
If values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b,a = map(atleast_1d,(b,a))
if len(a.shape) != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if len(b.shape) > 2:
raise ValueError("Numerator polynomial must be rank-1 or rank-2 array.")
if len(b.shape) == 1:
b = asarray([b],b.dtype.char)
while a[0] == 0.0 and len(a) > 1:
a = a[1:]
outb = b * (1.0) / a[0]
outa = a * (1.0) / a[0]
if allclose(outb[:,0], 0, rtol=1e-14):
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
while allclose(outb[:,0], 0, rtol=1e-14) and (outb.shape[-1] > 1):
outb = outb[:,1:]
if outb.shape[0] == 1:
outb = outb[0]
return outb, outa
def lp2lp(b, a, wo=1.0):
"""Return a low-pass filter with cutoff frequency `wo`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d,n))
pwo = pow(wo,numpy.arange(M-1,-1,-1))
start1 = max((n-d,0))
start2 = max((d-n,0))
b = b * pwo[start1]/pwo[start2:]
a = a * pwo[start1]/pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""Return a high-pass filter with cutoff frequency `wo`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo,numpy.arange(max((d,n))))
else:
pwo = numpy.ones(max((d,n)),b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b,(d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a,(n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""Return a band-pass filter with center frequency `wo` and bandwidth `bw`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a,b))
ma = max([N,D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
wosq = wo*wo
for j in range(Np+1):
val = 0.0
for i in range(0,N+1):
for k in range(0,i+1):
if ma-i+2*k == j:
val += comb(i,k)*b[N-i]*(wosq)**(i-k) / bw**i
bprime[Np-j] = val
for j in range(Dp+1):
val = 0.0
for i in range(0,D+1):
for k in range(0,i+1):
if ma-i+2*k == j:
val += comb(i,k)*a[D-i]*(wosq)**(i-k) / bw**i
aprime[Dp-j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1, bw=1):
"""Return a band-stop filter with center frequency `wo` and bandwidth `bw`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a,b))
M = max([N,D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
wosq = wo*wo
for j in range(Np+1):
val = 0.0
for i in range(0,N+1):
for k in range(0,M-i+1):
if i+2*k == j:
val += comb(M-i,k)*b[N-i]*(wosq)**(M-i-k) * bw**i
bprime[Np-j] = val
for j in range(Dp+1):
val = 0.0
for i in range(0,D+1):
for k in range(0,M-i+1):
if i+2*k == j:
val += comb(M-i,k)*a[D-i]*(wosq)**(M-i-k) * bw**i
aprime[Dp-j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog filter using the bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1``) for ``s``.
"""
fs =float(fs)
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N,D])
Np = M
Dp = M
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
for j in range(Np+1):
val = 0.0
for i in range(N+1):
for k in range(i+1):
for l in range(M-i+1):
if k+l == j:
val += comb(i,k)*comb(M-i,l)*b[N-i]*pow(2*fs,i)*(-1)**k
bprime[j] = real(val)
for j in range(Dp+1):
val = 0.0
for i in range(D+1):
for k in range(i+1):
for l in range(M-i+1):
if k+l == j:
val += comb(i,k)*comb(M-i,l)*a[D-i]*pow(2*fs,i)*(-1)**k
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=0, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba') or pole-zero ('zpk') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
ftype : str, optional
The type of IIR filter to design:
- elliptic : 'ellip'
- Butterworth : 'butter',
- Chebyshev I : 'cheby1',
- Chebyshev II: 'cheby2',
- Bessel : 'bessel'
output : ['ba', 'zpk'], optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
Returns
-------
b, a :
Numerator and denominator of the IIR filter. Only returned if
``output='ba'``.
z, p, k : Zeros, poles, and gain of the IIR filter. Only returned if
``output='zpk'``.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError("%s does not have order selection use iirfilter function." % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2*(len(wp)-1)
band_type +=1
if wp[0] >= ws[0]:
band_type += 1
btype = {1:'lowpass', 2:'highpass', 3:'bandstop', 4:'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=0, ftype='butter', output='ba'):
"""IIR digital and analog filter design given order and critical points.
Design an Nth order lowpass digital or analog filter and return the filter
coefficients in (B,A) (numerator, denominator) or (Z,P,K) form.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
rp : float, optional
For Chebyshev and elliptic filters provides the maximum ripple
in the passband.
rs : float, optional
For chebyshev and elliptic filters provides the minimum attenuation in
the stop band.
btype : str, optional
The type of filter (lowpass, highpass, bandpass, bandstop).
Default is bandpass.
analog : int, optional
Non-zero to return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- elliptic : 'ellip'
- Butterworth : 'butter',
- Chebyshev I : 'cheby1',
- Chebyshev II: 'cheby2',
- Bessel : 'bessel'
output : ['ba', 'zpk'], optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
See Also
--------
butterord, cheb1ord, cheb2ord, ellipord
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("%s is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("%s is not a valid basic iir filter." % ftype)
if output not in ['ba', 'zpk']:
raise ValueError("%s is not a valid output form." % output)
#pre-warp frequencies for digital filter design
if not analog:
fs = 2.0
warped = 2*fs*tan(pi*Wn/fs)
else:
warped = Wn
# convert to low-pass prototype
if btype in ['lowpass', 'highpass']:
wo = warped
else:
bw = warped[1] - warped[0]
wo = sqrt(warped[0]*warped[1])
# Get analog lowpass prototype
if typefunc in [buttap, besselap]:
z, p, k = typefunc(N)
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband atteunatuion (rs) must be provided to design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
else: # Elliptic filters
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an elliptic filter.")
z, p, k = typefunc(N, rp, rs)
b, a = zpk2tf(z,p,k)
# transform to lowpass, bandpass, highpass, or bandstop
if btype == 'lowpass':
b, a = lp2lp(b,a,wo=wo)
elif btype == 'highpass':
b, a = lp2hp(b,a,wo=wo)
elif btype == 'bandpass':
b, a = lp2bp(b,a,wo=wo,bw=bw)
else: # 'bandstop'
b, a = lp2bs(b,a,wo=wo,bw=bw)
# Find discrete equivalent if necessary
if not analog:
b, a = bilinear(b, a, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return tf2zpk(b,a)
else:
return b,a
def butter(N, Wn, btype='low', analog=0, output='ba'):
"""Butterworth digital and analog filter design.
Design an Nth order lowpass digital or analog Butterworth filter and return
the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
buttord.
"""
return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=0, output='ba'):
"""Chebyshev type I digital and analog filter design.
Design an Nth order lowpass digital or analog Chebyshev type I filter and
return the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
cheb1ord.
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog, output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=0, output='ba'):
"""Chebyshev type I digital and analog filter design.
Design an Nth order lowpass digital or analog Chebyshev type I filter and
return the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
cheb2ord.
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog, output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=0, output='ba'):
"""Elliptic (Cauer) digital and analog filter design.
Design an Nth order lowpass digital or analog elliptic filter and return
the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
ellipord.
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog, output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=0, output='ba'):
"""Bessel digital and analog filter design.
Design an Nth order lowpass digital or analog Bessel filter and return the
filter coefficients in (B,A) or (Z,P,K) form.
"""
return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='bessel')
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp :
Edge of passband `passb`.
ind : int
Index specifying which `passb` edge to vary (0 or 1).
passb : array_like
Two element sequence of fixed passband edges.
stopb : array_like
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : ['butter', 'cheby', 'ellip']
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = stopb*(passbC[0]-passbC[1]) / (stopb**2 - passbC[0]*passbC[1])
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
n = (log10((GSTOP-1.0)/(GPASS-1.0)) / (2*log10(nat)))
elif type == 'cheby':
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
n = arccosh(sqrt((GSTOP-1.0)/(GPASS-1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10**(0.1*gstop)
GPASS = 10**(0.1*gpass)
arg1 = sqrt( (GPASS-1.0) / (GSTOP-1.0) )
arg0 = 1.0 / nat
d0 = special.ellipk([arg0**2, 1-arg0**2])
d1 = special.ellipk([arg1**2, 1-arg1**2])
n = (d0[0]*d1[1] / (d0[1]*d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=0):
"""Butterworth filter order selection.
Return the order of the lowest order digital Butterworth filter that loses
no more than `gpass` dB in the passband and has at least `gstop` dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
filter_type +=1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies
if not analog:
passb = tan(wp*pi/2.0)
stopb = tan(ws*pi/2.0)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'butter'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil( log10((GSTOP-1.0)/(GPASS-1.0)) / (2*log10(nat))))
# Find the butterworth natural frequency W0 (or the "3dB" frequency")
# to give exactly gstop at nat. W0 will be between 1 and nat
try:
W0 = nat / ( ( 10**(0.1*abs(gstop))-1)**(1.0/(2.0*ord)))
except ZeroDivisionError:
W0 = nat
print "Warning, order is zero...check input parametegstop."
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0*passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2,float)
WN[0] = ((passb[1] - passb[0]) + sqrt((passb[1] - passb[0])**2 + \
4*W0**2 * passb[0] * passb[1])) / (2*W0)
WN[1] = ((passb[1] - passb[0]) - sqrt((passb[1] - passb[0])**2 + \
4*W0**2 * passb[0] * passb[1])) / (2*W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0],float)
WN = -W0 * (passb[1]-passb[0]) / 2.0 + sqrt(W0**2 / 4.0 * \
(passb[1]-passb[0])**2 + \
passb[0]*passb[1])
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0/pi)*arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=0):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital Chebyshev Type I filter that
loses no more than `gpass` dB in the passband and has at least `gstop` dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-wagpass frequencies
if not analog:
passb = tan(pi*wp/2.)
stopb = tan(pi*ws/2.)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'cheby'), disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'cheby'), disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP-1.0) / (GPASS-1.0))) / arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0/pi)*arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=0):
"""Chebyshev type II filter order selection.
Description:
Return the order of the lowest order digital Chebyshev Type II filter
that loses no more than gpass dB in the passband and has at least gstop dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-wagpass frequencies
if not analog:
passb = tan(pi*wp/2.0)
stopb = tan(pi*ws/2.0)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'cheby'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP-1.0) / (GPASS-1.0))) / arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0/ord * arccosh(sqrt((GSTOP-1.0)/(GPASS-1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2,float)
nat[0] = new_freq / 2.0 * (passb[0]-passb[1]) + \
sqrt(new_freq**2 * (passb[1]-passb[0])**2 / 4.0 + \
passb[1] * passb[0])
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2,float)
nat[0] = 1.0/(2.0*new_freq) * (passb[0] - passb[1]) + \
sqrt((passb[1]-passb[0])**2 / (4.0*new_freq**2) + \
passb[1] * passb[0])
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0/pi)*arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=0):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital elliptic filter that loses no
more than gpass dB in the passband and has at least gstop dB attenuation in
the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.-
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-wagpass frequencies
if analog:
passb = wp*1.0
stopb = ws*1.0
else:
passb = tan(wp*pi/2.0)
stopb = tan(ws*pi/2.0)
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'ellip'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*gstop)
GPASS = 10**(0.1*gpass)
arg1 = sqrt( (GPASS-1.0) / (GSTOP-1.0) )
arg0 = 1.0 / nat
d0 = special.ellipk([arg0**2, 1-arg0**2])
d1 = special.ellipk([arg1**2, 1-arg1**2])
ord = int(ceil(d0[0]*d1[1] / (d0[1]*d1[0])))
if not analog:
wn = arctan(passb)*2.0/pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) zero, pole, gain for analog prototype of an Nth
order Butterworth filter."""
z = []
n = numpy.arange(1,N+1)
p = numpy.exp(1j*(2*n-1)/(2.0*N)*pi)*1j
k = 1
return z, p, k
def cheb1ap(N, rp):
"""Return (z,p,k) zero, pole, gain for Nth order Chebyshev type I lowpass
analog filter prototype with `rp` decibels of ripple in the passband.
"""
z = []
eps = numpy.sqrt(10**(0.1*rp)-1.0)
n = numpy.arange(1,N+1)
mu = 1.0/N * numpy.log((1.0+numpy.sqrt(1+eps*eps)) / eps)
theta = pi/2.0 * (2*n-1.0)/N
p = -numpy.sinh(mu)*numpy.sin(theta) + 1j*numpy.cosh(mu)*numpy.cos(theta)
k = numpy.prod(-p,axis=0).real
if N % 2 == 0:
k = k / sqrt((1+eps*eps))
return z, p, k
pass
def cheb2ap(N, rs):
"""Return (z,p,k) zero, pole, gain for Nth order Chebyshev type II lowpass
analog filter prototype with `rs` decibels of ripple in the stopband.
"""
de = 1.0/sqrt(10**(0.1*rs)-1)
mu = arcsinh(1.0/de)/N
if N % 2:
m = N - 1
n = numpy.concatenate((numpy.arange(1,N-1,2),numpy.arange(N+2,2*N,2)))
else:
m = N
n = numpy.arange(1,2*N,2)
z = conjugate(1j / cos(n*pi/(2.0*N)))
p = exp(1j*(pi*numpy.arange(1,2*N,2)/(2.0*N) + pi/2.0))
p = sinh(mu) * p.real + 1j*cosh(mu)*p.imag
p = 1.0 / p
k = (numpy.prod(-p,axis=0)/numpy.prod(-z,axis=0)).real
return z, p, k
EPSILON = 2e-16
def vratio(u, ineps, mp):
[s,c,d,phi] = special.ellipj(u,mp)
ret = abs(ineps - s/c)
return ret
def kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m,1-m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) zeros, poles, and gain of an Nth order normalized
prototype elliptic analog lowpass filter with `rp` decibels of ripple in
the passband and a stopband `rs` decibels down.
References
----------
Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5
and 12.
"""
if N == 1:
p = -sqrt(1.0/(10**(0.1*rp)-1.0))
k = -p
z = []
return z, p, k
eps = numpy.sqrt(10**(0.1*rp)-1)
ck1 = eps / numpy.sqrt(10**(0.1*rs)-1)
ck1p = numpy.sqrt(1-ck1*ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs specifications.")
wp = 1
val = special.ellipk([ck1*ck1,ck1p*ck1p])
if abs(1-ck1p*ck1p) < EPSILON:
krat = 0
else:
krat = N*val[0] / val[1]
m = optimize.fmin(kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
ws = wp / sqrt(m)
m1 = 1-m
j = numpy.arange(1-N%2,N,2)
jj = len(j)
[s,c,d,phi] = special.ellipj(j*capk/N,m*numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s,axis=-1)
z = 1.0 / (sqrt(m)*snew)
z = 1j*z
z = numpy.concatenate((z,conjugate(z)))
r = optimize.fmin(vratio, special.ellipk(m), args=(1./eps, ck1p*ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N*val[0])
[sv,cv,dv,phi] = special.ellipj(v0,1-m)
p = -(c*d*sv*cv + 1j*s*dv) / (1-(d*sv)**2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON*numpy.sqrt(numpy.sum(p*numpy.conjugate(p),axis=0).real), p,axis=-1)
p = numpy.concatenate((p,conjugate(newp)))
else:
p = numpy.concatenate((p,conjugate(p)))
k = (numpy.prod(-p,axis=0) / numpy.prod(-z,axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1+eps*eps))
return z, p, k
def besselap(N):
"""Return (z,p,k) zero, pole, gain for analog prototype of an Nth order
Bessel filter."""
z = []
k = 1
if N == 0:
p = [];
elif N == 1:
p = [-1]
elif N == 2:
p = [-.8660254037844386467637229+.4999999999999999999999996*1j,
-.8660254037844386467637229-.4999999999999999999999996*1j]
elif N == 3:
p = [-.9416000265332067855971980,
-.7456403858480766441810907-.7113666249728352680992154*1j,
-.7456403858480766441810907+.7113666249728352680992154*1j]
elif N == 4:
p = [-.6572111716718829545787781-.8301614350048733772399715*1j,
-.6572111716718829545787788+.8301614350048733772399715*1j,
-.9047587967882449459642637-.2709187330038746636700923*1j,
-.9047587967882449459642624+.2709187330038746636700926*1j]
elif N == 5:
p = [-.9264420773877602247196260,
-.8515536193688395541722677-.4427174639443327209850002*1j,
-.8515536193688395541722677+.4427174639443327209850002*1j,
-.5905759446119191779319432-.9072067564574549539291747*1j,
-.5905759446119191779319432+.9072067564574549539291747*1j]
elif N == 6:
p = [-.9093906830472271808050953-.1856964396793046769246397*1j,
-.9093906830472271808050953+.1856964396793046769246397*1j,
-.7996541858328288520243325-.5621717346937317988594118*1j,
-.7996541858328288520243325+.5621717346937317988594118*1j,
-.5385526816693109683073792-.9616876881954277199245657*1j,
-.5385526816693109683073792+.9616876881954277199245657*1j]
elif N == 7:
p = [-.9194871556490290014311619,
-.8800029341523374639772340-.3216652762307739398381830*1j,
-.8800029341523374639772340+.3216652762307739398381830*1j,
-.7527355434093214462291616-.6504696305522550699212995*1j,
-.7527355434093214462291616+.6504696305522550699212995*1j,
-.4966917256672316755024763-1.002508508454420401230220*1j,
-.4966917256672316755024763+1.002508508454420401230220*1j]
elif N == 8:
p = [-.9096831546652910216327629-.1412437976671422927888150*1j,
-.9096831546652910216327629+.1412437976671422927888150*1j,
-.8473250802359334320103023-.4259017538272934994996429*1j,
-.8473250802359334320103023+.4259017538272934994996429*1j,
-.7111381808485399250796172-.7186517314108401705762571*1j,
-.7111381808485399250796172+.7186517314108401705762571*1j,
-.4621740412532122027072175-1.034388681126901058116589*1j,
-.4621740412532122027072175+1.034388681126901058116589*1j]
elif N == 9:
p = [-.9154957797499037686769223,
-.8911217017079759323183848-.2526580934582164192308115*1j,
-.8911217017079759323183848+.2526580934582164192308115*1j,
-.8148021112269012975514135-.5085815689631499483745341*1j,
-.8148021112269012975514135+.5085815689631499483745341*1j,
-.6743622686854761980403401-.7730546212691183706919682*1j,
-.6743622686854761980403401+.7730546212691183706919682*1j,
-.4331415561553618854685942-1.060073670135929666774323*1j,
-.4331415561553618854685942+1.060073670135929666774323*1j]
elif N == 10:
p = [-.9091347320900502436826431-.1139583137335511169927714*1j,
-.9091347320900502436826431+.1139583137335511169927714*1j,
-.8688459641284764527921864-.3430008233766309973110589*1j,
-.8688459641284764527921864+.3430008233766309973110589*1j,
-.7837694413101441082655890-.5759147538499947070009852*1j,
-.7837694413101441082655890+.5759147538499947070009852*1j,
-.6417513866988316136190854-.8175836167191017226233947*1j,
-.6417513866988316136190854+.8175836167191017226233947*1j,
-.4083220732868861566219785-1.081274842819124562037210*1j,
-.4083220732868861566219785+1.081274842819124562037210*1j]
elif N == 11:
p = [-.9129067244518981934637318,
-.8963656705721166099815744-.2080480375071031919692341*1j
-.8963656705721166099815744+.2080480375071031919692341*1j,
-.8453044014712962954184557-.4178696917801248292797448*1j,
-.8453044014712962954184557+.4178696917801248292797448*1j,
-.7546938934722303128102142-.6319150050721846494520941*1j,
-.7546938934722303128102142+.6319150050721846494520941*1j,
-.6126871554915194054182909-.8547813893314764631518509*1j,
-.6126871554915194054182909+.8547813893314764631518509*1j,
-.3868149510055090879155425-1.099117466763120928733632*1j,
-.3868149510055090879155425+1.099117466763120928733632*1j]
elif N == 12:
p = [-.9084478234140682638817772-95506365213450398415258360.0e-27*1j,
-.9084478234140682638817772+95506365213450398415258360.0e-27*1j,
-.8802534342016826507901575-.2871779503524226723615457*1j,
-.8802534342016826507901575+.2871779503524226723615457*1j,
-.8217296939939077285792834-.4810212115100676440620548*1j,
-.8217296939939077285792834+.4810212115100676440620548*1j,
-.7276681615395159454547013-.6792961178764694160048987*1j,
-.7276681615395159454547013+.6792961178764694160048987*1j,
-.5866369321861477207528215-.8863772751320727026622149*1j,
-.5866369321861477207528215+.8863772751320727026622149*1j,
-.3679640085526312839425808-1.114373575641546257595657*1j,
-.3679640085526312839425808+1.114373575641546257595657*1j]
elif N == 13:
p = [-.9110914665984182781070663,
-.8991314665475196220910718-.1768342956161043620980863*1j,
-.8991314665475196220910718+.1768342956161043620980863*1j,
-.8625094198260548711573628-.3547413731172988997754038*1j,
-.8625094198260548711573628+.3547413731172988997754038*1j,
-.7987460692470972510394686-.5350752120696801938272504*1j,
-.7987460692470972510394686+.5350752120696801938272504*1j,
-.7026234675721275653944062-.7199611890171304131266374*1j,
-.7026234675721275653944062+.7199611890171304131266374*1j,
-.5631559842430199266325818-.9135900338325109684927731*1j,
-.5631559842430199266325818+.9135900338325109684927731*1j,
-.3512792323389821669401925-1.127591548317705678613239*1j,
-.3512792323389821669401925+1.127591548317705678613239*1j]
elif N == 14:
p = [-.9077932138396487614720659-82196399419401501888968130.0e-27*1j,
-.9077932138396487614720659+82196399419401501888968130.0e-27*1j,
-.8869506674916445312089167-.2470079178765333183201435*1j,
-.8869506674916445312089167+.2470079178765333183201435*1j,
-.8441199160909851197897667-.4131653825102692595237260*1j,
-.8441199160909851197897667+.4131653825102692595237260*1j,
-.7766591387063623897344648-.5819170677377608590492434*1j,
-.7766591387063623897344648+.5819170677377608590492434*1j,
-.6794256425119233117869491-.7552857305042033418417492*1j,
-.6794256425119233117869491+.7552857305042033418417492*1j,
-.5418766775112297376541293-.9373043683516919569183099*1j,
-.5418766775112297376541293+.9373043683516919569183099*1j,
-.3363868224902037330610040-1.139172297839859991370924*1j,
-.3363868224902037330610040+1.139172297839859991370924*1j]
elif N == 15:
p = [-.9097482363849064167228581,
-.9006981694176978324932918-.1537681197278439351298882*1j,
-.9006981694176978324932918+.1537681197278439351298882*1j,
-.8731264620834984978337843-.3082352470564267657715883*1j,
-.8731264620834984978337843+.3082352470564267657715883*1j,
-.8256631452587146506294553-.4642348752734325631275134*1j,
-.8256631452587146506294553+.4642348752734325631275134*1j,
-.7556027168970728127850416-.6229396358758267198938604*1j,
-.7556027168970728127850416+.6229396358758267198938604*1j,
-.6579196593110998676999362-.7862895503722515897065645*1j,
-.6579196593110998676999362+.7862895503722515897065645*1j,
-.5224954069658330616875186-.9581787261092526478889345*1j,
-.5224954069658330616875186+.9581787261092526478889345*1j,
-.3229963059766444287113517-1.149416154583629539665297*1j,
-.3229963059766444287113517+1.149416154583629539665297*1j]
elif N == 16:
p = [-.9072099595087001356491337-72142113041117326028823950.0e-27*1j,
-.9072099595087001356491337+72142113041117326028823950.0e-27*1j,
-.8911723070323647674780132-.2167089659900576449410059*1j,
-.8911723070323647674780132+.2167089659900576449410059*1j,
-.8584264231521330481755780-.3621697271802065647661080*1j,
-.8584264231521330481755780+.3621697271802065647661080*1j,
-.8074790293236003885306146-.5092933751171800179676218*1j,
-.8074790293236003885306146+.5092933751171800179676218*1j,
-.7356166304713115980927279-.6591950877860393745845254*1j,
-.7356166304713115980927279+.6591950877860393745845254*1j,
-.6379502514039066715773828-.8137453537108761895522580*1j,
-.6379502514039066715773828+.8137453537108761895522580*1j,
-.5047606444424766743309967-.9767137477799090692947061*1j,
-.5047606444424766743309967+.9767137477799090692947061*1j,
-.3108782755645387813283867-1.158552841199330479412225*1j,
-.3108782755645387813283867+1.158552841199330479412225*1j]
elif N == 17:
p = [-.9087141161336397432860029,
-.9016273850787285964692844-.1360267995173024591237303*1j,
-.9016273850787285964692844+.1360267995173024591237303*1j,
-.8801100704438627158492165-.2725347156478803885651973*1j,
-.8801100704438627158492165+.2725347156478803885651973*1j,
-.8433414495836129204455491-.4100759282910021624185986*1j,
-.8433414495836129204455491+.4100759282910021624185986*1j,
-.7897644147799708220288138-.5493724405281088674296232*1j,
-.7897644147799708220288138+.5493724405281088674296232*1j,
-.7166893842372349049842743-.6914936286393609433305754*1j,
-.7166893842372349049842743+.6914936286393609433305754*1j,
-.6193710717342144521602448-.8382497252826992979368621*1j,
-.6193710717342144521602448+.8382497252826992979368621*1j,
-.4884629337672704194973683-.9932971956316781632345466*1j,
-.4884629337672704194973683+.9932971956316781632345466*1j,
-.2998489459990082015466971-1.166761272925668786676672*1j,
-.2998489459990082015466971+1.166761272925668786676672*1j]
elif N == 18:
p = [-.9067004324162775554189031-64279241063930693839360680.0e-27*1j,
-.9067004324162775554189031+64279241063930693839360680.0e-27*1j,
-.8939764278132455733032155-.1930374640894758606940586*1j,
-.8939764278132455733032155+.1930374640894758606940586*1j,
-.8681095503628830078317207-.3224204925163257604931634*1j,
-.8681095503628830078317207+.3224204925163257604931634*1j,
-.8281885016242836608829018-.4529385697815916950149364*1j,
-.8281885016242836608829018+.4529385697815916950149364*1j,
-.7726285030739558780127746-.5852778162086640620016316*1j,
-.7726285030739558780127746+.5852778162086640620016316*1j,
-.6987821445005273020051878-.7204696509726630531663123*1j,
-.6987821445005273020051878+.7204696509726630531663123*1j,
-.6020482668090644386627299-.8602708961893664447167418*1j,
-.6020482668090644386627299+.8602708961893664447167418*1j,
-.4734268069916151511140032-1.008234300314801077034158*1j,
-.4734268069916151511140032+1.008234300314801077034158*1j,
-.2897592029880489845789953-1.174183010600059128532230*1j,
-.2897592029880489845789953+1.174183010600059128532230*1j]
elif N == 19:
p = [-.9078934217899404528985092,
-.9021937639390660668922536-.1219568381872026517578164*1j,
-.9021937639390660668922536+.1219568381872026517578164*1j,
-.8849290585034385274001112-.2442590757549818229026280*1j,
-.8849290585034385274001112+.2442590757549818229026280*1j,
-.8555768765618421591093993-.3672925896399872304734923*1j,
-.8555768765618421591093993+.3672925896399872304734923*1j,
-.8131725551578197705476160-.4915365035562459055630005*1j,
-.8131725551578197705476160+.4915365035562459055630005*1j,
-.7561260971541629355231897-.6176483917970178919174173*1j,
-.7561260971541629355231897+.6176483917970178919174173*1j,
-.6818424412912442033411634-.7466272357947761283262338*1j,
-.6818424412912442033411634+.7466272357947761283262338*1j,
-.5858613321217832644813602-.8801817131014566284786759*1j,
-.5858613321217832644813602+.8801817131014566284786759*1j,
-.4595043449730988600785456-1.021768776912671221830298*1j,
-.4595043449730988600785456+1.021768776912671221830298*1j,
-.2804866851439370027628724-1.180931628453291873626003*1j,
-.2804866851439370027628724+1.180931628453291873626003*1j]
elif N == 20:
p = [-.9062570115576771146523497-57961780277849516990208850.0e-27*1j,
-.9062570115576771146523497+57961780277849516990208850.0e-27*1j,
-.8959150941925768608568248-.1740317175918705058595844*1j,
-.8959150941925768608568248+.1740317175918705058595844*1j,
-.8749560316673332850673214-.2905559296567908031706902*1j,
-.8749560316673332850673214+.2905559296567908031706902*1j,
-.8427907479956670633544106-.4078917326291934082132821*1j,
-.8427907479956670633544106+.4078917326291934082132821*1j,
-.7984251191290606875799876-.5264942388817132427317659*1j,
-.7984251191290606875799876+.5264942388817132427317659*1j,
-.7402780309646768991232610-.6469975237605228320268752*1j,
-.7402780309646768991232610+.6469975237605228320268752*1j,
-.6658120544829934193890626-.7703721701100763015154510*1j,
-.6658120544829934193890626+.7703721701100763015154510*1j,
-.5707026806915714094398061-.8982829066468255593407161*1j,
-.5707026806915714094398061+.8982829066468255593407161*1j,
-.4465700698205149555701841-1.034097702560842962315411*1j,
-.4465700698205149555701841+1.034097702560842962315411*1j,
-.2719299580251652601727704-1.187099379810885886139638*1j,
-.2719299580251652601727704+1.187099379810885886139638*1j]
elif N == 21:
p = [-.9072262653142957028884077,
-.9025428073192696303995083-.1105252572789856480992275*1j,
-.9025428073192696303995083+.1105252572789856480992275*1j,
-.8883808106664449854431605-.2213069215084350419975358*1j,
-.8883808106664449854431605+.2213069215084350419975358*1j,
-.8643915813643204553970169-.3326258512522187083009453*1j,
-.8643915813643204553970169+.3326258512522187083009453*1j,
-.8299435470674444100273463-.4448177739407956609694059*1j,
-.8299435470674444100273463+.4448177739407956609694059*1j,
-.7840287980408341576100581-.5583186348022854707564856*1j,
-.7840287980408341576100581+.5583186348022854707564856*1j,
-.7250839687106612822281339-.6737426063024382240549898*1j,
-.7250839687106612822281339+.6737426063024382240549898*1j,
-.6506315378609463397807996-.7920349342629491368548074*1j,
-.6506315378609463397807996+.7920349342629491368548074*1j,
-.5564766488918562465935297-.9148198405846724121600860*1j,
-.5564766488918562465935297+.9148198405846724121600860*1j,
-.4345168906815271799687308-1.045382255856986531461592*1j,
-.4345168906815271799687308+1.045382255856986531461592*1j,
-.2640041595834031147954813-1.192762031948052470183960*1j,
-.2640041595834031147954813+1.192762031948052470183960*1j]
elif N == 22:
p = [-.9058702269930872551848625-52774908289999045189007100.0e-27*1j,
-.9058702269930872551848625+52774908289999045189007100.0e-27*1j,
-.8972983138153530955952835-.1584351912289865608659759*1j,
-.8972983138153530955952835+.1584351912289865608659759*1j,
-.8799661455640176154025352-.2644363039201535049656450*1j,
-.8799661455640176154025352+.2644363039201535049656450*1j,
-.8534754036851687233084587-.3710389319482319823405321*1j,
-.8534754036851687233084587+.3710389319482319823405321*1j,
-.8171682088462720394344996-.4785619492202780899653575*1j,
-.8171682088462720394344996+.4785619492202780899653575*1j,
-.7700332930556816872932937-.5874255426351153211965601*1j,
-.7700332930556816872932937+.5874255426351153211965601*1j,
-.7105305456418785989070935-.6982266265924524000098548*1j,
-.7105305456418785989070935+.6982266265924524000098548*1j,
-.6362427683267827226840153-.8118875040246347267248508*1j,
-.6362427683267827226840153+.8118875040246347267248508*1j,
-.5430983056306302779658129-.9299947824439872998916657*1j,
-.5430983056306302779658129+.9299947824439872998916657*1j,
-.4232528745642628461715044-1.055755605227545931204656*1j,
-.4232528745642628461715044+1.055755605227545931204656*1j,
-.2566376987939318038016012-1.197982433555213008346532*1j,
-.2566376987939318038016012+1.197982433555213008346532*1j]
elif N == 23:
p = [-.9066732476324988168207439,
-.9027564979912504609412993-.1010534335314045013252480*1j,
-.9027564979912504609412993+.1010534335314045013252480*1j,
-.8909283242471251458653994-.2023024699381223418195228*1j,
-.8909283242471251458653994+.2023024699381223418195228*1j,
-.8709469395587416239596874-.3039581993950041588888925*1j,
-.8709469395587416239596874+.3039581993950041588888925*1j,
-.8423805948021127057054288-.4062657948237602726779246*1j,
-.8423805948021127057054288+.4062657948237602726779246*1j,
-.8045561642053176205623187-.5095305912227258268309528*1j,
-.8045561642053176205623187+.5095305912227258268309528*1j,
-.7564660146829880581478138-.6141594859476032127216463*1j,
-.7564660146829880581478138+.6141594859476032127216463*1j,
-.6965966033912705387505040-.7207341374753046970247055*1j,
-.6965966033912705387505040+.7207341374753046970247055*1j,
-.6225903228771341778273152-.8301558302812980678845563*1j,
-.6225903228771341778273152+.8301558302812980678845563*1j,
-.5304922463810191698502226-.9439760364018300083750242*1j,
-.5304922463810191698502226+.9439760364018300083750242*1j,
-.4126986617510148836149955-1.065328794475513585531053*1j,
-.4126986617510148836149955+1.065328794475513585531053*1j,
-.2497697202208956030229911-1.202813187870697831365338*1j,
-.2497697202208956030229911+1.202813187870697831365338*1j]
elif N == 24:
p = [-.9055312363372773709269407-48440066540478700874836350.0e-27*1j,
-.9055312363372773709269407+48440066540478700874836350.0e-27*1j,
-.8983105104397872954053307-.1454056133873610120105857*1j,
-.8983105104397872954053307+.1454056133873610120105857*1j,
-.8837358034555706623131950-.2426335234401383076544239*1j,
-.8837358034555706623131950+.2426335234401383076544239*1j,
-.8615278304016353651120610-.3403202112618624773397257*1j,
-.8615278304016353651120610+.3403202112618624773397257*1j,
-.8312326466813240652679563-.4386985933597305434577492*1j,
-.8312326466813240652679563+.4386985933597305434577492*1j,
-.7921695462343492518845446-.5380628490968016700338001*1j,
-.7921695462343492518845446+.5380628490968016700338001*1j,
-.7433392285088529449175873-.6388084216222567930378296*1j,
-.7433392285088529449175873+.6388084216222567930378296*1j,
-.6832565803536521302816011-.7415032695091650806797753*1j,
-.6832565803536521302816011+.7415032695091650806797753*1j,
-.6096221567378335562589532-.8470292433077202380020454*1j,
-.6096221567378335562589532+.8470292433077202380020454*1j,
-.5185914574820317343536707-.9569048385259054576937721*1j,
-.5185914574820317343536707+.9569048385259054576937721*1j,
-.4027853855197518014786978-1.074195196518674765143729*1j,
-.4027853855197518014786978+1.074195196518674765143729*1j,
-.2433481337524869675825448-1.207298683731972524975429*1j,
-.2433481337524869675825448+1.207298683731972524975429*1j]
elif N == 25:
p = [-.9062073871811708652496104,
-.9028833390228020537142561-93077131185102967450643820.0e-27*1j,
-.9028833390228020537142561+93077131185102967450643820.0e-27*1j,
-.8928551459883548836774529-.1863068969804300712287138*1j,
-.8928551459883548836774529+.1863068969804300712287138*1j,
-.8759497989677857803656239-.2798521321771408719327250*1j,
-.8759497989677857803656239+.2798521321771408719327250*1j,
-.8518616886554019782346493-.3738977875907595009446142*1j,
-.8518616886554019782346493+.3738977875907595009446142*1j,
-.8201226043936880253962552-.4686668574656966589020580*1j,
-.8201226043936880253962552+.4686668574656966589020580*1j,
-.7800496278186497225905443-.5644441210349710332887354*1j,
-.7800496278186497225905443+.5644441210349710332887354*1j,
-.7306549271849967721596735-.6616149647357748681460822*1j,
-.7306549271849967721596735+.6616149647357748681460822*1j,
-.6704827128029559528610523-.7607348858167839877987008*1j,
-.6704827128029559528610523+.7607348858167839877987008*1j,
-.5972898661335557242320528-.8626676330388028512598538*1j,
-.5972898661335557242320528+.8626676330388028512598538*1j,
-.5073362861078468845461362-.9689006305344868494672405*1j,
-.5073362861078468845461362+.9689006305344868494672405*1j,
-.3934529878191079606023847-1.082433927173831581956863*1j,
-.3934529878191079606023847+1.082433927173831581956863*1j,
-.2373280669322028974199184-1.211476658382565356579418*1j,
-.2373280669322028974199184+1.211476658382565356579418*1j]
else:
raise ValueError("Bessel Filter not supported for order %d" % N)
return z, p, k
filter_dict = {'butter': [buttap,buttord],
'butterworth' : [buttap,buttord],
'cauer' : [ellipap,ellipord],
'elliptic' : [ellipap,ellipord],
'ellip' : [ellipap,ellipord],
'bessel' : [besselap],
'cheby1' : [cheb1ap, cheb1ord],
'chebyshev1' : [cheb1ap, cheb1ord],
'chebyshevi' : [cheb1ap, cheb1ord],
'cheby2' : [cheb2ap, cheb2ord],
'chebyshev2' : [cheb2ap, cheb2ord],
'chebyshevii' : [cheb2ap, cheb2ord]
}
band_dict = {'band':'bandpass',
'bandpass':'bandpass',
'pass' : 'bandpass',
'bp':'bandpass',
'bs':'bandstop',
'bandstop':'bandstop',
'bands' : 'bandstop',
'stop' : 'bandstop',
'l' : 'lowpass',
'low': 'lowpass',
'lowpass' : 'lowpass',
'high' : 'highpass',
'highpass' : 'highpass',
'h' : 'highpass'
}
warnings.simplefilter("always", BadCoefficients)
| gpl-3.0 |
Eric89GXL/scikit-learn | examples/applications/plot_out_of_core_classification.py | 3 | 12406 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents is the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import sgmllib
import tarfile
import time
import urllib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(sgmllib.SGMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, verbose=0):
sgmllib.SGMLParser.__init__(self, verbose)
self._reset()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk)
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename)):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [('{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
X_test_text, y_test = get_minibatch(data_stream, 1000)
X_test = vectorizer.transform(X_test_text)
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Time in Process')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
| bsd-3-clause |
JonnaStalring/AZOrange | ConfPred/conformal-master/cp/evaluation.py | 1 | 14351 | """Evaluation module contains methods for evaluation of conformal predictors.
Function :py:func:`run` produces Results of an appropriate type by using a Sampler on a given data set
to split it into a training and testing set.
Structure:
- Sampler (sampling methods)
- :py:class:`RandomSampler`
- :py:class:`CrossSampler`
- :py:class:`LOOSampler`
- Results (evaluation results)
- :py:class:`ResultsClass`
- :py:class:`ResultsRegr`
- Evaluation methods
- :py:func:`run`
- :py:func:`run_train_test`
- :py:func:`calibration_plot`
"""
import numpy as np
import matplotlib.pyplot as plt
import time
from Orange.regression import LinearRegressionLearner
from sklearn.cross_validation import KFold
from Orange.classification import LogisticRegressionLearner, NaiveBayesLearner
from Orange.data import Table
from cp.classification import TransductiveClassifier, ConformalClassifier, InductiveClassifier, CrossClassifier
from cp.nonconformity import InverseProbability, AbsError, ProbabilityMargin
from cp.regression import ConformalRegressor, TransductiveRegressor, InductiveRegressor, CrossRegressor
from cp.utils import split_data, shuffle_data
class Sampler:
"""Base class for various data sampling/splitting methods.
Attributes:
data (Table): Data set for sampling.
n (int): Size of the data set.
Examples:
>>> s = CrossSampler(Table('iris'), 4)
>>> for train, test in s.repeat(3):
>>> print(train)
"""
def __init__(self, data):
"""Initialize the data set."""
self.data = data
self.n = len(data)
def __iter__(self):
return self
def __next__(self):
"""Extending samplers should implement the __next__ method to return the selected
and remaining part of the data.
"""
raise NotImplementedError
def repeat(self, rep=1):
"""Repeat sampling several times."""
for r in range(rep):
for train, test in self:
yield train, test
class RandomSampler(Sampler):
"""Randomly samples a subset of data in proportion a:b.
Attributes:
k (float): Size of the selected subset.
Examples:
>>> s = RandomSampler(Table('iris'), 3, 2)
>>> train, test = next(s)
"""
def __init__(self, data, a, b):
"""Initialize the data set and the size of the desired selection."""
super().__init__(data)
self.k = a*self.n//(a+b)
def __iter__(self):
"""Return a special iterator over a single split of data."""
yield next(self)
def __next__(self):
"""Splits the data based on a random permutation."""
perm = np.random.permutation(self.n)
train_ind, test_ind = perm[:self.k], perm[self.k:]
return self.data[train_ind], self.data[test_ind]
class CrossSampler(Sampler):
"""Sample the data in :py:attr:`k` folds. Shuffle the data before determining the folds.
Attributes:
k (int): Number of folds.
Examples:
>>> s = CrossSampler(Table('iris'), 4)
>>> for train, test in s:
>>> print(train)
"""
def __init__(self, data, k):
super().__init__(data)
self.k = k
self.kf = None
def __next__(self):
"""Compute the next fold. Initializes a new k-fold split on each repetition of the entire
sampling procedure.
"""
if self.kf is None:
self.kf = iter(KFold(self.n, n_folds=self.k, shuffle=True))
try:
train_ind, test_ind = next(self.kf)
return self.data[train_ind], self.data[test_ind]
except StopIteration:
self.kf = None
raise StopIteration
class LOOSampler(CrossSampler):
"""Leave-One-Out sampler is a cross sampler with the number of folds equal to the size of the data set.
Examples:
>>> s = LOOSampler(Table('iris'))
>>> for train, test in s:
>>> print(len(test))
"""
def __init__(self, data):
super().__init__(data, len(data))
class Results:
"""Contains results of an evaluation of a conformal predictor
returned by the :py:func:`run` function.
Examples:
>>> cp = CrossClassifier(InverseProbability(LogisticRegressionLearner()), 5)
>>> r = run(cp, 0.1, RandomSampler(Table('iris'), 2, 1))
>>> print(r.accuracy())
"""
def __init__(self):
self.preds = []
self.refs = []
self.tm = 0
def add(self, pred, ref):
"""Add a new predicted and corresponding reference value."""
self.preds.append(pred)
self.refs.append(ref)
def concatenate(self, r):
"""Concatenate another set of results."""
self.preds += r.preds
self.refs += r.refs
self.tm += r.tm
def accuracy(self):
"""Compute the accuracy of the predictor averaging verdicts of individual predictions. This is the fraction
of instances that contain the actual/reference class among the predicted ones for classification and the fraction of
instances that contain the actual value within the predicted range for regression."""
v = [p.verdict(r) for p, r in zip(self.preds, self.refs)]
return np.mean(v)
def time(self):
return self.tm
class ResultsClass(Results):
"""Results of evaluating a conformal classifier. Provides classification specific efficiency measures.
Examples:
>>> cp = CrossClassifier(InverseProbability(LogisticRegressionLearner()), 5)
>>> r = run(cp, 0.1, RandomSampler(Table('iris'), 2, 1))
>>> print(r.singleton_criterion())
"""
def accuracy(self, class_value=None):
"""Compute accuracy for test instances with a given class value. If this parameter is not given,
compute accuracy over all instances, regardless of their class."""
if class_value is None:
return super().accuracy()
else:
v = [p.verdict(r) for p, r in zip(self.preds, self.refs) if r == class_value]
return np.mean(v)
def confidence(self):
"""Average confidence of predictions."""
return np.mean([pred.confidence() for pred in self.preds])
def credibility(self):
"""Average credibility of predictions."""
return np.mean([pred.credibility() for pred in self.preds])
def confusion(self, actual, predicted):
"""Compute the number of singleton predictions of class `predicted` when the actual class is `actual`.
Examples:
Drawing a confusion matrix.
>>> data = Table('iris')
>>> cp = CrossClassifier(InverseProbability(LogisticRegressionLearner()), 3)
>>> r = run(cp, 0.1, RandomSampler(data, 2, 1))
>>> values = data.domain.class_var.values
>>> form = '{: >20}'*(len(values)+1)
>>> print(form.format('actual\\predicted', *values))
>>> for a in values:
>>> c = [r.confusion(a, p) for p in values]
>>> print(('{: >20}'*(len(c)+1)).format(a, *c))
actual\predicted Iris-setosa Iris-versicolor Iris-virginica
Iris-setosa 18 0 0
Iris-versicolor 0 14 4
Iris-virginica 0 0 12
"""
return sum(pred.classes()[0] == predicted and ref == actual
for pred, ref in zip(self.preds, self.refs) if len(pred.classes()) == 1)
def multiple_criterion(self):
"""Number of cases with multiple predicted classes."""
c = [len(pred.classes()) > 1 for pred in self.preds]
return np.mean(c)
def singleton_criterion(self):
"""Number of cases with a single predicted class."""
c = [len(pred.classes()) == 1 for pred in self.preds]
return np.mean(c)
def empty_criterion(self):
"""Number of cases with no predicted classes."""
c = [len(pred.classes()) == 0 for pred in self.preds]
return np.mean(c)
def singleton_correct(self):
"""Fraction of singleton predictions that are correct."""
c = [pred.verdict(ref) for pred, ref in zip(self.preds, self.refs) if len(pred.classes()) == 1]
return np.mean(c)
class ResultsRegr(Results):
"""Results of evaluating a conformal regressor. Provides regression specific efficiency measures.
Examples:
>>> ir = InductiveRegressor(AbsErrorKNN(Euclidean, 10, average=True))
>>> r = run(ir, 0.1, RandomSampler(Table('housing'), 2, 1))
>>> print(r.interdecile_range())
"""
def widths(self):
return [pred.width() for pred in self.preds]
def median_range(self):
"""Median width of predicted ranges."""
return np.median(self.widths())
def mean_range(self):
"""Mean width of predicted ranges."""
return np.mean(self.widths())
def std_dev(self):
"""Standard deviation of widths of predicted ranges."""
return np.std(self.widths())
def interdecile_range(self):
"""Difference between the first and ninth decile of widths of predicted ranges."""
w = self.widths()
return np.percentile(w, 90) - np.percentile(w, 10)
def interdecile_mean(self):
"""Mean width discarding the smallest and largest 10% of widths of predicted ranges."""
w = self.widths()
decile = int(0.1*len(w))
return np.mean(w[decile:len(w)-decile])
def run(cp, eps, sampler, rep=1):
"""Run method is used to repeat an experiment one or more times with different splits of the dataset
into a training and testing set. The splits are defined by the provided sampler. The conformal predictor
itself might further split the testing set internally for its computations (e.g. inductive or cross predictors).
Run the conformal predictor `cp` on the datasets defined by the provided sampler and number of repetitions
and construct the results. Fit the conformal predictor on each training set returned by the sampler and
evaluate it on the corresponding test set.
Inductive conformal predictors use one third of the training set (random subset) for calibration.
For more control over the exact datasets used for training, testing and calibration see :py:func:`run_train_test`.
Returns:
:py:class:`ResultsClass` or :py:class:`ResultsRegr`
Examples:
>>> cp = CrossClassifier(InverseProbability(LogisticRegressionLearner()), 5)
>>> r = run(cp, 0.1, CrossSampler(Table('iris'), 4), rep=3)
>>> print(r.accuracy(), r.empty_criterion())
The above example uses a :py:class:`CrossSampler` to define training and testing datasets. Each fold is used as the test
set and the rest as a training set. The entire process is repeated three times with different fold splits
and results in 3*n predictions, where n is the size of the dataset.
"""
classification = isinstance(cp, ConformalClassifier)
results = ResultsClass() if classification else ResultsRegr()
for train, test in sampler.repeat(rep):
r = run_train_test(cp, eps, train, test)
results.concatenate(r)
return results
def run_train_test(cp, eps, train, test, calibrate=None):
"""Fits the conformal predictor `cp` on the training dataset and evaluates it on the testing set.
Inductive conformal predictors use the provided calibration set or default to extracting one third
of the training set (random subset) for calibration.
Returns:
:py:class:`ResultsClass` or :py:class:`ResultsRegr`
Examples:
>>> tab = Table('iris')
>>> cp = CrossClassifier(InverseProbability(LogisticRegressionLearner()), 4)
>>> r = run_train_test(cp, 0.1, tab[:100], tab[100:])
>>> print(r.accuracy(), r.singleton_criterion())
"""
classification = isinstance(cp, ConformalClassifier)
results = ResultsClass() if classification else ResultsRegr()
start = time.time()
if isinstance(cp, TransductiveClassifier) or isinstance(cp, TransductiveRegressor) or \
isinstance(cp, CrossClassifier) or isinstance(cp, CrossRegressor):
cp.fit(train)
else: # Inductive predictor
if calibrate is None:
train, calibrate = next(RandomSampler(train, 2, 1))
cp.fit(train, calibrate)
for inst in test:
results.add(cp.predict(inst.x, eps), inst.get_class())
finish = time.time()
results.tm = finish-start
return results
def calibration_plot(cp, data, k=11, rep=1, title='calibration plot', fname='cplot.png'):
"""Draw and save a calibration plot by evaluating the conformal predictor (`cp`)
with different significance values `eps` on random train/test splits.
Repeat the experiment `rep` times.
Examples:
>>> cp = CrossClassifier(InverseProbability(LogisticRegressionLearner()), 5)
>>> calibration_plot(cp, Table('iris'))
"""
errs = []
lin = np.linspace(0.0, 1.0, k)
for eps in lin:
r = run(cp, eps, RandomSampler(data, 4, 1), rep)
print(eps, 1-r.accuracy())
errs.append(1 - r.accuracy())
plt.xlabel('significance (eps)')
plt.ylabel('error rate')
plt.title(title)
plt.plot(lin, errs, color='blue')
plt.plot([0,1], [0,1], color='black')
plt.tight_layout()
plt.savefig(fname)
plt.close()
if __name__ == '__main__':
data = Table('auto-mpg')
nc_str = "AbsError(LinearRegressionLearner())"
cp_str = "CrossRegressor(nc, 5)"
nc = eval(nc_str)
cp = eval(cp_str)
calibration_plot(cp, data, rep=5,
title='data = %s\nnc = %s\ncp = %s\n' % (data.name, nc_str, cp_str),
fname='../plots/'+data.name+'.png')
| lgpl-3.0 |
brain-research/mirage-rl-qprop | convert-stdout-to-csv.py | 1 | 1440 | """
Copyright 2018 Google LLC
Use of this source code is governed by an MIT-style
license that can be found in the LICENSE file or at
https://opensource.org/licenses/MIT.
"""
import os
import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import colors as mcolors
import argparse
import glob
import numpy as np
import seaborn as sns
import scipy
from scipy.signal import savgol_filter
# Convert the nasty stdout files to CSV files
def main(fnames):
for i, fname in enumerate(fnames):
with open(fname) as f:
content = f.readlines()
rets, its = [], []
for line in content:
if 'AverageReturn' in line:
rets.append(float(line.split()[-1]))
if 'Iteration' in line:
its.append(int(line.split()[-1]))
with open('{}.csv'.format(fname), 'w+') as f:
f.write('Iteration,AverageReturn\n')
for it, ret in zip(its, rets):
f.write('{},{}\n'.format(it, ret))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Plot average rewards of experiments.")
parser.add_argument('--files', help="Pass in regex-style for filenames", required=True)
args = parser.parse_args()
fnames = glob.glob(args.files)
# fnames = [fname for fname in fnames if fname.split('/')[2] != 'test']
print("\n".join(fnames))
main(fnames)
| mit |
davidgbe/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
lxybox1/MissionPlanner | Lib/site-packages/scipy/stats/distributions.py | 53 | 207806 | # Functions to implement several important functions for
# various Continous and Discrete Probability Distributions
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
import math
import warnings
from copy import copy
from scipy.misc import comb, derivative
from scipy import special
from scipy import optimize
from scipy import integrate
from scipy.special import gammaln as gamln
import inspect
from numpy import alltrue, where, arange, putmask, \
ravel, take, ones, sum, shape, product, repeat, reshape, \
zeros, floor, logical_and, log, sqrt, exp, arctanh, tan, sin, arcsin, \
arctan, tanh, ndarray, cos, cosh, sinh, newaxis, array, log1p, expm1
from numpy import atleast_1d, polyval, ceil, place, extract, \
any, argsort, argmax, vectorize, r_, asarray, nan, inf, pi, isinf, \
power, NINF, empty
import numpy
import numpy as np
import numpy.random as mtrand
from numpy import flatnonzero as nonzero
import vonmises_cython
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n==0):
return 1.0
elif (n==1):
if mu is None:
val = moment_func(1,*args)
else:
val = mu
elif (n==2):
if mu2 is None or mu is None:
val = moment_func(2,*args)
else:
val = mu2 + mu*mu
elif (n==3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3,*args)
else:
mu3 = g1*(mu2**1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu**3 # 3rd non-central moment
elif (n==4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4,*args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*(mu2**1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu**4
else:
val = moment_func(n, *args)
return val
def _skew(data):
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / m2**1.5
def _kurtosis(data):
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
__all__ = [
'rv_continuous',
'ksone', 'kstwobign', 'norm', 'alpha', 'anglit', 'arcsine',
'beta', 'betaprime', 'bradford', 'burr', 'fisk', 'cauchy',
'chi', 'chi2', 'cosine', 'dgamma', 'dweibull', 'erlang',
'expon', 'exponweib', 'exponpow', 'fatiguelife', 'foldcauchy',
'f', 'foldnorm', 'frechet_r', 'weibull_min', 'frechet_l',
'weibull_max', 'genlogistic', 'genpareto', 'genexpon', 'genextreme',
'gamma', 'gengamma', 'genhalflogistic', 'gompertz', 'gumbel_r',
'gumbel_l', 'halfcauchy', 'halflogistic', 'halfnorm', 'hypsecant',
'gausshyper', 'invgamma', 'invnorm', 'invgauss', 'invweibull',
'johnsonsb', 'johnsonsu', 'laplace', 'levy', 'levy_l',
'levy_stable', 'logistic', 'loggamma', 'loglaplace', 'lognorm',
'gilbrat', 'maxwell', 'mielke', 'nakagami', 'ncx2', 'ncf', 't',
'nct', 'pareto', 'lomax', 'powerlaw', 'powerlognorm', 'powernorm',
'rdist', 'rayleigh', 'reciprocal', 'rice', 'recipinvgauss',
'semicircular', 'triang', 'truncexpon', 'truncnorm',
'tukeylambda', 'uniform', 'vonmises', 'wald', 'wrapcauchy',
'entropy', 'rv_discrete',
'binom', 'bernoulli', 'nbinom', 'geom', 'hypergeom', 'logser',
'poisson', 'planck', 'boltzmann', 'randint', 'zipf', 'dlaplace',
'skellam'
]
floatinfo = numpy.finfo(float)
errp = special.errprint
arr = asarray
gam = special.gamma
import types
from scipy.misc import doccer
all = alltrue
sgf = vectorize
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings.
docheaders = {'methods':"""\nMethods\n-------\n""",
'parameters':"""\nParameters\n---------\n""",
'notes':"""\nNotes\n-----\n""",
'examples':"""\nExamples\n--------\n"""}
_doc_rvs = \
"""rvs(%(shapes)s, loc=0, scale=1, size=1)
Random variates.
"""
_doc_pdf = \
"""pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = \
"""logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = \
"""pmf(x, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = \
"""logpmf(x, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = \
"""cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative density function.
"""
_doc_logcdf = \
"""logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative density function.
"""
_doc_sf = \
"""sf(x, %(shapes)s, loc=0, scale=1)
Survival function (1-cdf --- sometimes more accurate).
"""
_doc_logsf = \
"""logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = \
"""ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of cdf --- percentiles).
"""
_doc_isf = \
"""isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of sf).
"""
_doc_moment = \
"""moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = \
"""stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = \
"""entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = \
"""fit(data, %(shapes)s, loc=0, scale=1)
Parameter estimates for generic data.
"""
_doc_expect = \
"""expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = \
"""expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = \
"""median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = \
"""mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = \
"""var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = \
"""std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = \
"""interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
# Note that the two lines for %(shapes) are searched for and replaced in
# rv_continuous and rv_discrete - update there if the exact string changes
_doc_default_callparams = \
"""
Parameters
----------
x : array-like
quantiles
q : array-like
lower or upper tail probability
%(shapes)s : array-like
shape parameters
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : str, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
"""
_doc_default_longsummary = \
"""Continuous random variables are defined from a standard form and may
require some shape parameters to complete its specification. Any
optional keyword parameters can be passed to the methods of the RV
object as given below:
"""
_doc_default_frozen_note = \
"""
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = \
"""Examples
--------
>>> import matplotlib.pyplot as plt
>>> numargs = %(name)s.numargs
>>> [ %(shapes)s ] = [0.9,] * numargs
>>> rv = %(name)s(%(shapes)s)
Display frozen pdf
>>> x = np.linspace(0, np.minimum(rv.dist.b, 3))
>>> h = plt.plot(x, rv.pdf(x))
Check accuracy of cdf and ppf
>>> prb = %(name)s.cdf(x, %(shapes)s)
>>> h = plt.semilogy(np.abs(x - %(name)s.ppf(prb, %(shapes)s)) + 1e-20)
Random number generation
>>> R = %(name)s.rvs(%(shapes)s, size=100)
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note,
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note])
docdict = {'rvs':_doc_rvs,
'pdf':_doc_pdf,
'logpdf':_doc_logpdf,
'cdf':_doc_cdf,
'logcdf':_doc_logcdf,
'sf':_doc_sf,
'logsf':_doc_logsf,
'ppf':_doc_ppf,
'isf':_doc_isf,
'stats':_doc_stats,
'entropy':_doc_entropy,
'fit':_doc_fit,
'moment':_doc_moment,
'expect':_doc_expect,
'interval':_doc_interval,
'mean':_doc_mean,
'std':_doc_std,
'var':_doc_var,
'median':_doc_median,
'allmethods':_doc_allmethods,
'callparams':_doc_default_callparams,
'longsummary':_doc_default_longsummary,
'frozennote':_doc_default_frozen_note,
'example':_doc_default_example,
'default':_doc_default,
'before_notes':_doc_default_before_notes}
# Reuse common content between continous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'fit', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in
_doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(\
'Continuous', 'Discrete')
_doc_default_frozen_note = \
"""
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
docdict_discrete['example'] = _doc_default_example.replace('[0.9,]',
'Replace with reasonable value')
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _build_random_array(fun, args, size=None):
# Build an array by applying function fun to
# the arguments in args, creating an array with
# the specified shape.
# Allows an integer shape n as a shorthand for (n,).
if isinstance(size, types.IntType):
size = [size]
if size is not None and len(size) != 0:
n = numpy.multiply.reduce(size)
s = apply(fun, args + (n,))
s.shape = size
return s
else:
n = 1
s = apply(fun, args + (n,))
return s[0]
random = mtrand.random_sample
rand = mtrand.rand
random_integers = mtrand.random_integers
permutation = mtrand.permutation
## Internal class to compute a ppf given a distribution.
## (needs cdf function) and uses brentq from scipy.optimize
## to compute ppf from cdf.
class general_cont_ppf(object):
def __init__(self, dist, xa=-10.0, xb=10.0, xtol=1e-14):
self.dist = dist
self.cdf = eval('%scdf'%dist)
self.xa = xa
self.xb = xb
self.xtol = xtol
self.vecfunc = sgf(self._single_call,otypes='d')
def _tosolve(self, x, q, *args):
return apply(self.cdf, (x, )+args) - q
def _single_call(self, q, *args):
return optimize.brentq(self._tosolve, self.xa, self.xb, args=(q,)+args, xtol=self.xtol)
def __call__(self, q, *args):
return self.vecfunc(q, *args)
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
self.dist = dist
def pdf(self, x): #raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None):
kwds = self.kwds.copy()
kwds.update({'size':size})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments':moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self,k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self,k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
## NANs are returned for unsupported parameters.
## location and scale parameters are optional for each distribution.
## The shape parameters are generally required
##
## The loc and scale parameters must be given as keyword parameters.
## These are related to the common symbols in the .lyx file
## skew is third central moment / variance**(1.5)
## kurtosis is fourth central moment / variance**2 - 3
## References::
## Documentation for ranlib, rv2, cdflib and
##
## Eric Wesstein's world of mathematics http://mathworld.wolfram.com/
## http://mathworld.wolfram.com/topics/StatisticalDistributions.html
##
## Documentation to Regress+ by Michael McLaughlin
##
## Engineering and Statistics Handbook (NIST)
## http://www.itl.nist.gov/div898/handbook/index.htm
##
## Documentation for DATAPLOT from NIST
## http://www.itl.nist.gov/div898/software/dataplot/distribu.htm
##
## Norman Johnson, Samuel Kotz, and N. Balakrishnan "Continuous
## Univariate Distributions", second edition,
## Volumes I and II, Wiley & Sons, 1994.
## Each continuous random variable as the following methods
##
## rvs -- Random Variates (alternatively calling the class could produce these)
## pdf -- PDF
## logpdf -- log PDF (more numerically accurate if possible)
## cdf -- CDF
## logcdf -- log of CDF
## sf -- Survival Function (1-CDF)
## logsf --- log of SF
## ppf -- Percent Point Function (Inverse of CDF)
## isf -- Inverse Survival Function (Inverse of SF)
## stats -- Return mean, variance, (Fisher's) skew, or (Fisher's) kurtosis
## nnlf -- negative log likelihood function (to minimize)
## fit -- Model-fitting
##
## Maybe Later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
## To define a new random variable you subclass the rv_continuous class
## and re-define the
##
## _pdf method which will be given clean arguments (in between a and b)
## and passing the argument check method
##
## If postive argument checking is not correct for your RV
## then you will also need to re-define
## _argcheck
## Correct, but potentially slow defaults exist for the remaining
## methods but for speed and/or accuracy you can over-ride
##
## _cdf, _ppf, _rvs, _isf, _sf
##
## Rarely would you override _isf and _sf but you could for numerical precision.
##
## Statistics are computed using numerical integration by default.
## For speed you can redefine this using
##
## _stats --- take shape parameters and return mu, mu2, g1, g2
## --- If you can't compute one of these return it as None
##
## --- Can also be defined with a keyword argument moments=<str>
## where <str> is a string composed of 'm', 'v', 's',
## and/or 'k'. Only the components appearing in string
## should be computed and returned in the order 'm', 'v',
## 's', or 'k' with missing values returned as None
##
## OR
##
## You can override
##
## _munp -- takes n and shape parameters and returns
## -- then nth non-central moment of the distribution.
##
def valarray(shape,value=nan,typecode=None):
"""Return an array of all value.
"""
out = reshape(repeat([value],product(shape,axis=0),axis=0),shape)
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = arr(out)
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4,5))
>>> B = 2
>>> C = rand((1,5))
>>> cond = np.ones(A.shape)
>>> [A1,B1,C1] = argsreduce(cond,A,B,C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2,B2,C2] = argsreduce(cond,A,B,C)
>>> B2.shape
(15,)
"""
newargs = atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs,]
expand_arr = (cond==cond)
return [extract(cond, arr1 * expand_arr) for arr1 in newargs]
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def _fix_loc_scale(self, args, loc, scale=1):
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None:
# loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and scale is None:
# loc and scale given without keyword
loc, scale = args[-2:]
args = args[:self.numargs]
if scale is None:
scale = 1.0
if loc is None:
loc = 0.0
return args, loc, scale
def _fix_loc(self, args, loc):
args, loc, scale = self._fix_loc_scale(args, loc)
return args, loc
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self,*args,**kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
defining number of random variates (default=1)
Returns
-------
rvs : array-like
random variates of given `size`
"""
kwd_names = ['loc', 'scale', 'size', 'discrete']
loc, scale, size, discrete = map(kwds.get, kwd_names,
[None]*len(kwd_names))
args, loc, scale = self._fix_loc_scale(args, loc, scale)
cond = logical_and(self._argcheck(*args),(scale >= 0))
if not all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = numpy.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# Cast to int if discrete
if discrete:
if numpy.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
median : float
the median of the distribution.
See Also
--------
self.ppf --- inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""Confidence interval with equal areas around the median
Parameters
----------
alpha : array-like float in [0,1]
Probability that an rv will be drawn from the returned range
arg1, arg2, ... : array-like
The shape parameter(s) for the distribution (see docstring of the instance
object for more information)
loc: array-like, optioal
location parameter (deafult = 0)
scale : array-like, optional
scale paramter (default = 1)
Returns
-------
a, b: array-like (float)
end-points of range that contain alpha % of the rvs
"""
alpha = arr(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances from for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default) for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xa : float, optional
Lower bound for fixed point calculation for generic ppf.
xb : float, optional
Upper bound for fixed point calculation for generic ppf.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : object, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
rvs(<shape(s)>, loc=0, scale=1, size=1)
random variates
pdf(x, <shape(s)>, loc=0, scale=1)
probability density function
logpdf(x, <shape(s)>, loc=0, scale=1)
log of the probability density function
cdf(x, <shape(s)>, loc=0, scale=1)
cumulative density function
logcdf(x, <shape(s)>, loc=0, scale=1)
log of the cumulative density function
sf(x, <shape(s)>, loc=0, scale=1)
survival function (1-cdf --- sometimes more accurate)
logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
ppf(q, <shape(s)>, loc=0, scale=1)
percent point function (inverse of cdf --- quantiles)
isf(q, <shape(s)>, loc=0, scale=1)
inverse survival function (inverse of sf)
moment(n, <shape(s)>, loc=0, scale=1)
non-central n-th moment of the distribution. May not work for array arguments.
stats(<shape(s)>, loc=0, scale=1, moments='mv')
mean('m'), variance('v'), skew('s'), and/or kurtosis('k')
entropy(<shape(s)>, loc=0, scale=1)
(differential) entropy of the RV.
fit(data, <shape(s)>, loc=0, scale=1)
Parameter estimates for generic data
expect(func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
median(<shape(s)>, loc=0, scale=1)
Median of the distribution.
mean(<shape(s)>, loc=0, scale=1)
Mean of the distribution.
std(<shape(s)>, loc=0, scale=1)
Standard deviation of the distribution.
var(<shape(s)>, loc=0, scale=1)
Variance of the distribution.
interval(alpha, <shape(s)>, loc=0, scale=1)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
__call__(<shape(s)>, loc=0, scale=1)
Calling a distribution instance creates a frozen RV object with the
same methods but holding the given shape, location, and scale fixed.
See Notes section.
**Parameters for Methods**
x : array-like
quantiles
q : array-like
lower or upper tail probability
<shape(s)> : array-like
shape parameters
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : string, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
n : int
order of moment to calculate in method moments
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
Notes
-----
**Frozen Distribution**
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Subclassing**
New random variables can be defined by subclassing rv_continuous class
and re-defining at least the
_pdf or the _cdf method (normalized to location 0 and scale 1)
which will be given clean arguments (in between a and b) and
passing the argument check method
If postive argument checking is not correct for your RV
then you will also need to re-define ::
_argcheck
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride ::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override _isf, _sf, and _logsf but you could.
Statistics are computed using numerical integration by default.
For speed you can redefine this using
_stats
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument moments=<str>
where <str> is a string composed of 'm', 'v', 's',
and/or 'k'. Only the components appearing in string
should be computed and returned in the order 'm', 'v',
's', or 'k' with missing values returned as None
OR
You can override
_munp
takes n and shape parameters and returns
the nth non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following::
class gaussian_gen(rv_continuous):
"Gaussian distribution"
def _pdf:
...
...
"""
def __init__(self, momtype=1, a=None, b=None, xa=-10.0, xb=10.0,
xtol=1e-14, badvalue=None, name=None, longname=None,
shapes=None, extradoc=None):
rv_generic.__init__(self)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xa = xa
self.xb = xb
self.xtol = xtol
self._size = 1
self.m = 0.0
self.moment_type = momtype
self.expandarr = 1
if not hasattr(self,'numargs'):
#allows more general subclassing with *args
cdf_signature = inspect.getargspec(self._cdf.im_func)
numargs1 = len(cdf_signature[0]) - 2
pdf_signature = inspect.getargspec(self._pdf.im_func)
numargs2 = len(pdf_signature[0]) - 2
self.numargs = max(numargs1, numargs2)
#nin correction
self.vecfunc = sgf(self._ppf_single_call,otypes='d')
self.vecfunc.nin = self.numargs + 1
self.vecentropy = sgf(self._entropy,otypes='d')
self.vecentropy.nin = self.numargs + 1
self.veccdf = sgf(self._cdf_single_call,otypes='d')
self.veccdf.nin = self.numargs + 1
self.shapes = shapes
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = sgf(self._mom0_sc,otypes='d')
else:
self.generic_moment = sgf(self._mom1_sc,otypes='d')
self.generic_moment.nin = self.numargs+1 # Because of the *args argument
# of _mom0_sc, vectorize cannot count the number of arguments correctly.
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
# generate docstring for subclass instances
if self.__doc__ is None:
self._construct_default_doc(longname=longname, extradoc=extradoc)
else:
self._construct_doc()
## This only works for old-style classes...
# self.__class__.__doc__ = self.__doc__
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s continuous random variable.'%longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc()
def _construct_doc(self):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(\
"\n%(shapes)s : array-like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
def _ppf_to_solve(self, x, q,*args):
return apply(self.cdf, (x, )+args)-q
def _ppf_single_call(self, q, *args):
return optimize.brentq(self._ppf_to_solve, self.xa, self.xb, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x,m,*args):
return x**m * self.pdf(x,*args)
def _mom0_sc(self, m,*args):
return integrate.quad(self._mom_integ0, self.a,
self.b, args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q,m,*args):
return (self.ppf(q,*args))**m
def _mom1_sc(self, m,*args):
return integrate.quad(self._mom_integ1, 0, 1,args=(m,)+args)[0]
## These are the methods you must define (standard form functions)
def _argcheck(self, *args):
# Default check for correct values on args and keywords.
# Returns condition array of 1's where arguments are correct and
# 0's where they are not.
cond = 1
for arg in args:
cond = logical_and(cond,(arr(arg) > 0))
return cond
def _pdf(self,x,*args):
return derivative(self._cdf,x,dx=1e-5,args=args,order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = mtrand.sample(self._size)
Y = self._ppf(U,*args)
return Y
def _cdf_single_call(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self.veccdf(x,*args)
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x,*args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self.vecfunc(q,*args)
def _isf(self, q, *args):
return self._ppf(1.0-q,*args) #use correct _ppf for subclasses
# The actual cacluation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self,*args, **kwds):
return None, None, None, None
# Central moments
def _munp(self,n,*args):
return self.generic_moment(n,*args)
def pdf(self,x,*args,**kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
pdf : array-like
Probability density function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = arr((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
putmask(output,(1-cond0)*array(cond1,bool),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
output = place(output,cond,self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
logpdf : array-like
Log of the probability density function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = arr((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
putmask(output,(1-cond0)*array(cond1,bool),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
output = place(output,cond,self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self,x,*args,**kwds):
"""
Cumulative distribution function at x of the given RV.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
cdf : array-like
Cumulative distribution function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond),'d')
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,1.0)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
output = place(output,cond,self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self,x,*args,**kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
logcdf : array-like
Log of the cumulative distribution function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,0.0)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
output = place(output,cond,self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self,x,*args,**kwds):
"""
Survival function (1-cdf) at x of the given RV.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
sf : array-like
Survival function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
output = place(output,cond,self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self,x,*args,**kwds):
"""
Log of the Survival function log(1-cdf) at x of the given RV.
Parameters
----------
x : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
logsf : array-like
Log of the survival function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(arr,(x,loc,scale))
args = tuple(map(arr,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
output = place(output,cond,self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self,q,*args,**kwds):
"""
Percent point function (inverse of cdf) at q of the given RV.
Parameters
----------
q : array-like
lower tail probability
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
x : array-like
quantile corresponding to the lower tail probability q.
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
q,loc,scale = map(arr,(q,loc,scale))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.a*scale + loc)
output = place(output,(1-cond0)+(1-cond1)*(q!=0.0), self.badvalue)
output = place(output,cond2,self.b*scale + loc)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale,loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
output = place(output,cond,self._ppf(*goodargs)*scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self,q,*args,**kwds):
"""
Inverse survival function at q of the given RV.
Parameters
----------
q : array-like
upper tail probability
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
Returns
-------
x : array-like
quantile corresponding to the upper tail probability q.
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
q,loc,scale = map(arr,(q,loc,scale))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.b)
#output = place(output,(1-cond0)*(cond1==cond1), self.badvalue)
output = place(output,(1-cond0)*(cond1==cond1)+(1-cond1)*(q!=0.0), self.badvalue)
output = place(output,cond2,self.a)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale,loc))) #PB replace 1-q by q
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
output = place(output,cond,self._isf(*goodargs)*scale + loc) #PB use _isf instead of _ppf
if output.ndim == 0:
return output[()]
return output
def stats(self,*args,**kwds):
"""
Some statistics of the given RV
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
moments : string, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
loc,scale,moments=map(kwds.get,['loc','scale','moments'])
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None:
# loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and scale is None:
# loc and scale given without keyword
loc, scale = args[-2:]
if N == self.numargs + 3 and moments is None:
# loc, scale, and moments
loc, scale, moments = args[-3:]
args = args[:self.numargs]
if scale is None: scale = 1.0
if loc is None: loc = 0.0
if moments is None: moments = 'mv'
loc,scale = map(arr,(loc,scale))
args = tuple(map(arr,args))
cond = self._argcheck(*args) & (scale > 0) & (loc==loc)
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments})
else:
mu, mu2, g1, g2 = self._stats(*args)
if g1 is None:
mu3 = None
else:
mu3 = g1*np.power(mu2,1.5) #(mu2**1.5) breaks down for nan and inf
default = valarray(shape(cond), self.badvalue)
output = []
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale,loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if 'm' in moments:
if mu is None:
mu = self._munp(1.0,*goodargs)
out0 = default.copy()
out0 = place(out0,cond,mu*scale+loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
mu2 = mu2p - mu*mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
out0 = place(out0,cond,mu2*scale*scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
mu3 = mu3p - 3*mu*mu2 - mu**3
g1 = mu3 / mu2**1.5
out0 = default.copy()
out0 = place(out0,cond,g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
if mu3 is None:
mu3p = self._munp(3.0,*goodargs)
mu3 = mu3p - 3*mu*mu2 - mu**3
mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
out0 = place(out0,cond,g2)
output.append(out0)
else: #no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def moment(self, n, *args, **kwds):
"""
n'th order non-central moment of distribution
Parameters
----------
n: int, n>=1
order of moment
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : float, optional
location parameter (default=0)
scale : float, optional
scale parameter (default=1)
"""
loc = kwds.get('loc', 0)
scale = kwds.get('scale', 1)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0): raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mdict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args,**mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n,k,exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args),axis=0)
def nnlf(self, theta, x):
# - sum (log pdf(x, theta),axis=0)
# where theta are the parameters (including loc and scale)
#
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = arr((x-loc) / scale)
cond0 = (x <= self.a) | (x >= self.b)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N*log(scale)
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
return args + self.fit_loc_scale(data, *args)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
args = list(args)
Nargs = len(args) - 2
fixedn = []
index = range(Nargs) + [-2, -1]
names = ['f%d' % n for n in range(Nargs)] + ['floc', 'fscale']
x0 = args[:]
for n, key in zip(index, names):
if kwds.has_key(key):
fixedn.append(n)
args[n] = kwds[key]
del x0[n]
if len(fixedn) == 0:
func = self.nnlf
restore = None
else:
if len(fixedn) == len(index):
raise ValueError("All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self.nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
f0...fn : hold respective shape parameters fixed.
floc : hold location parameter fixed to specified value.
fscale : hold scale parameter fixed to specified value.
optimizer : The optimizer to use. The optimizer must take func,
and starting position as the first two arguments,
plus args (for extra arguments to pass to the
function to be optimized) and disp=0 to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
"""
Narg = len(args)
if Narg > self.numargs:
raise ValueError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not (kwds.has_key('loc') and
kwds.has_key('scale')):
start = self._fitstart(data) # get distribution specific starting locations
args += start[Narg:-2]
loc = kwds.get('loc', start[-2])
scale = kwds.get('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.get('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, (str, unicode)):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
vals = optimizer(func,x0,args=(ravel(data),),disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments
"""
mu, mu2 = self.stats(*args,**{'moments':'mv'})
muhat = arr(data).mean()
mu2hat = arr(data).var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
return Lhat, Shat
@np.deprecate
def est_loc_scale(self, data, *args):
"""This function is deprecated, use self.fit_loc_scale(data) instead. """
return self.fit_loc_scale(data, *args)
def freeze(self,*args,**kwds):
return rv_frozen(self,*args,**kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return val*log(val)
entr = -integrate.quad(integ,self.a,self.b)[0]
if not np.isnan(entr):
return entr
else: # try with different limits if integration problems
low,upp = self.ppf([0.001,0.999],*args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return -integrate.quad(integ,lower,upper)[0]
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale : array-like, optional
scale parameter (default=1)
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
output = zeros(shape(cond0),'d')
output = place(output,(1-cond0),self.badvalue)
goodargs = argsreduce(cond0, *args)
#I don't know when or why vecentropy got broken when numargs == 0
if self.numargs == 0:
output = place(output,cond0,self._entropy()+log(scale))
else:
output = place(output,cond0,self.vecentropy(*goodargs)+log(scale))
return output
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""calculate expected value of a function with respect to the distribution
location and scale only tested on a few examples
Parameters
----------
all parameters are keyword parameters
func : function (default: identity mapping)
Function for which integral is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution
conditional : boolean (False)
If true then the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from integrate.quad.
"""
lockwds = {'loc': loc,
'scale':scale}
if func is None:
def fun(x, *args):
return x*self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x)*self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
return integrate.quad(fun, lb, ub, **kwds)[0] / invfac
_EULER = 0.577215664901532860606512090082402431042 # -special.psi(1)
_ZETA3 = 1.202056903159594285399738161511449990765 # special.zeta(3,1) Apery's constant
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
def _cdf(self,x,n):
return 1.0-special.smirnov(n,x)
def _ppf(self,q,n):
return special.smirnovi(n,1.0-q)
ksone = ksone_gen(a=0.0,name='ksone', longname="Kolmogorov-Smirnov "\
"A one-sided test statistic.", shapes="n",
extradoc="""
General Kolmogorov-Smirnov one-sided test.
"""
)
class kstwobign_gen(rv_continuous):
def _cdf(self,x):
return 1.0-special.kolmogorov(x)
def _sf(self,x):
return special.kolmogorov(x)
def _ppf(self,q):
return special.kolmogi(1.0-q)
kstwobign = kstwobign_gen(a=0.0,name='kstwobign', longname='Kolmogorov-Smirnov two-sided (for large N)', extradoc="""
Kolmogorov-Smirnov two-sided test for large N
"""
)
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = math.sqrt(2*pi)
_norm_pdf_logC = math.log(_norm_pdf_C)
def _norm_pdf(x):
return exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return special.ndtr(x)
def _norm_logcdf(x):
return log(special.ndtr(x))
def _norm_ppf(q):
return special.ndtri(q)
class norm_gen(rv_continuous):
def _rvs(self):
return mtrand.standard_normal(self._size)
def _pdf(self,x):
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self,x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_cdf(-x)
def _logsf(self, x):
return _norm_logcdf(-x)
def _ppf(self,q):
return _norm_ppf(q)
def _isf(self,q):
return -_norm_ppf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(log(2*pi)+1)
norm = norm_gen(name='norm',longname='A normal',extradoc="""
Normal distribution
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
normal.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
""")
## Alpha distribution
##
class alpha_gen(rv_continuous):
def _pdf(self, x, a):
return 1.0/(x**2)/special.ndtr(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*log(x) + _norm_logpdf(a-1.0/x) - log(special.ndtr(a))
def _cdf(self, x, a):
return special.ndtr(a-1.0/x) / special.ndtr(a)
def _ppf(self, q, a):
return 1.0/arr(a-special.ndtri(q*special.ndtr(a)))
def _stats(self, a):
return [inf]*2 + [nan]*2
alpha = alpha_gen(a=0.0,name='alpha',shapes='a',extradoc="""
Alpha distribution
alpha.pdf(x,a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2)
where Phi(alpha) is the normal CDF, x > 0, and a > 0.
""")
## Anglit distribution
##
class anglit_gen(rv_continuous):
def _pdf(self, x):
return cos(2*x)
def _cdf(self, x):
return sin(x+pi/4)**2.0
def _ppf(self, q):
return (arcsin(sqrt(q))-pi/4)
def _stats(self):
return 0.0, pi*pi/16-0.5, 0.0, -2*(pi**4 - 96)/(pi*pi-8)**2
def _entropy(self):
return 1-log(2)
anglit = anglit_gen(a=-pi/4,b=pi/4,name='anglit', extradoc="""
Anglit distribution
anglit.pdf(x) = sin(2*x+pi/2) = cos(2*x) for -pi/4 <= x <= pi/4
""")
## Arcsine distribution
##
class arcsine_gen(rv_continuous):
def _pdf(self, x):
return 1.0/pi/sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/pi*arcsin(sqrt(x))
def _ppf(self, q):
return sin(pi/2.0*q)**2.0
def _stats(self):
#mup = 0.5, 3.0/8.0, 15.0/48.0, 35.0/128.0
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0,b=1.0,name='arcsine',extradoc="""
Arcsine distribution
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for 0 < x < 1.
""")
## Beta distribution
##
class beta_gen(rv_continuous):
def _rvs(self, a, b):
return mtrand.beta(a,b,self._size)
def _pdf(self, x, a, b):
Px = (1.0-x)**(b-1.0) * x**(a-1.0)
Px /= special.beta(a,b)
return Px
def _logpdf(self, x, a, b):
lPx = (b-1.0)*log(1.0-x) + (a-1.0)*log(x)
lPx -= log(special.beta(a,b))
return lPx
def _cdf(self, x, a, b):
return special.btdtr(a,b,x)
def _ppf(self, q, a, b):
return special.btdtri(a,b,q)
def _stats(self, a, b):
mn = a *1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*sqrt(a + b + 1) / (a + b + 2) / sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a,b))
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# special case
data = (ravel(data)-floc)/fscale
xbar = data.mean()
v = data.var(ddof=0)
fac = xbar*(1-xbar)/v - 1
a = xbar * fac
b = (1-xbar) * fac
return a, b, floc, fscale
else: # do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
beta = beta_gen(a=0.0, b=1.0, name='beta',shapes='a, b',extradoc="""
Beta distribution
beta.pdf(x, a, b) = gamma(a+b)/(gamma(a)*gamma(b)) * x**(a-1) * (1-x)**(b-1)
for 0 < x < 1, a, b > 0.
""")
## Beta Prime
class betaprime_gen(rv_continuous):
def _rvs(self, a, b):
u1 = gamma.rvs(a,size=self._size)
u2 = gamma.rvs(b,size=self._size)
return (u1 / u2)
def _pdf(self, x, a, b):
return 1.0/special.beta(a,b)*x**(a-1.0)/(1+x)**(a+b)
def _logpdf(self, x, a, b):
return (a-1.0)*log(x) - (a+b)*log(1+x) - log(special.beta(a,b))
def _cdf_skip(self, x, a, b):
# remove for now: special.hyp2f1 is incorrect for large a
x = where(x==1.0, 1.0-1e-6,x)
return pow(x,a)*special.hyp2f1(a+b,a,1+a,-x)/a/special.beta(a,b)
def _munp(self, n, a, b):
if (n == 1.0):
return where(b > 1, a/(b-1.0), inf)
elif (n == 2.0):
return where(b > 2, a*(a+1.0)/((b-2.0)*(b-1.0)), inf)
elif (n == 3.0):
return where(b > 3, a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
inf)
elif (n == 4.0):
return where(b > 4,
a*(a+1.0)*(a+2.0)*(a+3.0)/((b-4.0)*(b-3.0) \
*(b-2.0)*(b-1.0)), inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, b=500.0, name='betaprime', shapes='a, b',
extradoc="""
Beta prime distribution
betaprime.pdf(x, a, b) = gamma(a+b)/(gamma(a)*gamma(b))
* x**(a-1) * (1-x)**(-a-b)
for x > 0, a, b > 0.
""")
## Bradford
##
class bradford_gen(rv_continuous):
def _pdf(self, x, c):
return c / (c*x + 1.0) / log(1.0+c)
def _cdf(self, x, c):
return log(1.0+c*x) / log(c+1.0)
def _ppf(self, q, c):
return ((1.0+c)**q-1)/c
def _stats(self, c, moments='mv'):
k = log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) \
+ 6*c*k*k*(3*k-14) + 12*k**3
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = log(1+c)
return k/2.0 - log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford', longname="A Bradford",
shapes='c', extradoc="""
Bradford distribution
bradford.pdf(x,c) = c/(k*(1+c*x))
for 0 < x < 1, c > 0 and k = log(1+c).
""")
## Burr
# burr with d=1 is called the fisk distribution
class burr_gen(rv_continuous):
def _pdf(self, x, c, d):
return c*d*(x**(-c-1.0))*((1+x**(-c*1.0))**(-d-1.0))
def _cdf(self, x, c, d):
return (1+x**(-c*1.0))**(-d**1.0)
def _ppf(self, q, c, d):
return (q**(-1.0/d)-1)**(-1.0/c)
def _stats(self, c, d, moments='mv'):
g2c, g2cd = gam(1-2.0/c), gam(2.0/c+d)
g1c, g1cd = gam(1-1.0/c), gam(1.0/c+d)
gd = gam(d)
k = gd*g2c*g2cd - g1c**2 * g1cd**2
mu = g1c*g1cd / gd
mu2 = k / gd**2.0
g1, g2 = None, None
g3c, g3cd = None, None
if 's' in moments:
g3c, g3cd = gam(1-3.0/c), gam(3.0/c+d)
g1 = 2*g1c**3 * g1cd**3 + gd*gd*g3c*g3cd - 3*gd*g2c*g1c*g1cd*g2cd
g1 /= sqrt(k**3)
if 'k' in moments:
if g3c is None:
g3c = gam(1-3.0/c)
if g3cd is None:
g3cd = gam(3.0/c+d)
g4c, g4cd = gam(1-4.0/c), gam(4.0/c+d)
g2 = 6*gd*g2c*g2cd * g1c**2 * g1cd**2 + gd**3 * g4c*g4cd
g2 -= 3*g1c**4 * g1cd**4 -4*gd**2*g3c*g1c*g1cd*g3cd
return mu, mu2, g1, g2
burr = burr_gen(a=0.0, name='burr', longname="Burr",
shapes="c, d", extradoc="""
Burr distribution
burr.pdf(x,c,d) = c*d * x**(-c-1) * (1+x**(-c))**(-d-1)
for x > 0.
""")
# Fisk distribution
# burr is a generalization
class fisk_gen(burr_gen):
def _pdf(self, x, c):
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _stats(self, c):
return burr_gen._stats(self, c, 1.0)
def _entropy(self, c):
return 2 - log(c)
fisk = fisk_gen(a=0.0, name='fisk', longname="Fisk",
shapes='c', extradoc="""
Fisk distribution.
Also known as the log-logistic distribution.
Burr distribution with d=1.
"""
)
## Cauchy
# median = loc
class cauchy_gen(rv_continuous):
def _pdf(self, x):
return 1.0/pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi*q-pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/pi*arctan(x)
def _isf(self, q):
return tan(pi/2.0-pi*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(4*pi)
cauchy = cauchy_gen(name='cauchy',longname='Cauchy',extradoc="""
Cauchy distribution
cauchy.pdf(x) = 1/(pi*(1+x**2))
This is the t distribution with one degree of freedom.
"""
)
## Chi
## (positive square-root of chi-square)
## chi(1, loc, scale) = halfnormal
## chi(2, 0, scale) = Rayleigh
## chi(3, 0, scale) = MaxWell
class chi_gen(rv_continuous):
def _rvs(self, df):
return sqrt(chi2.rvs(df,size=self._size))
def _pdf(self, x, df):
return x**(df-1.)*exp(-x*x*0.5)/(2.0)**(df*0.5-1)/gam(df*0.5)
def _cdf(self, x, df):
return special.gammainc(df*0.5,0.5*x*x)
def _ppf(self, q, df):
return sqrt(2*special.gammaincinv(df*0.5,q))
def _stats(self, df):
mu = sqrt(2)*special.gamma(df/2.0+0.5)/special.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/arr(mu2**1.5)
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= arr(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0,name='chi',shapes='df',extradoc="""
Chi distribution
chi.pdf(x,df) = x**(df-1)*exp(-x**2/2)/(2**(df/2-1)*gamma(df/2))
for x > 0.
"""
)
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
def _rvs(self, df):
return mtrand.chisquare(df,self._size)
def _pdf(self, x, df):
return exp(self._logpdf(x, df))
def _logpdf(self, x, df):
#term1 = (df/2.-1)*log(x)
#term1[(df==2)*(x==0)] = 0
#avoid 0*log(0)==nan
return (df/2.-1)*log(x+1e-300) - x/2. - gamln(df/2.) - (log(2)*df)/2.
## Px = x**(df/2.0-1)*exp(-x/2.0)
## Px /= special.gamma(df/2.0)* 2**(df/2.0)
## return log(Px)
def _cdf(self, x, df):
return special.chdtr(df, x)
def _sf(self, x, df):
return special.chdtrc(df, x)
def _isf(self, p, df):
return special.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0,name='chi2',longname='A chi-squared',shapes='df',
extradoc="""
Chi-squared distribution
chi2.pdf(x,df) = 1/(2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
"""
)
## Cosine (Approximation to the Normal)
class cosine_gen(rv_continuous):
def _pdf(self, x):
return 1.0/2/pi*(1+cos(x))
def _cdf(self, x):
return 1.0/2/pi*(pi + x + sin(x))
def _stats(self):
return 0.0, pi*pi/3.0-2.0, 0.0, -6.0*(pi**4-90)/(5.0*(pi*pi-6)**2)
def _entropy(self):
return log(4*pi)-1.0
cosine = cosine_gen(a=-pi,b=pi,name='cosine',extradoc="""
Cosine distribution (approximation to the normal)
cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
for -pi <= x <= pi.
""")
## Double Gamma distribution
class dgamma_gen(rv_continuous):
def _rvs(self, a):
u = random(size=self._size)
return (gamma.rvs(a,size=self._size)*where(u>=0.5,1,-1))
def _pdf(self, x, a):
ax = abs(x)
return 1.0/(2*special.gamma(a))*ax**(a-1.0) * exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return (a-1.0)*log(ax) - ax - log(2) - gamln(a)
def _cdf(self, x, a):
fac = 0.5*special.gammainc(a,abs(x))
return where(x>0,0.5+fac,0.5-fac)
def _sf(self, x, a):
fac = 0.5*special.gammainc(a,abs(x))
#return where(x>0,0.5-0.5*fac,0.5+0.5*fac)
return where(x>0,0.5-fac,0.5+fac)
def _ppf(self, q, a):
fac = special.gammainccinv(a,1-abs(2*q-1))
return where(q>0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma',longname="A double gamma",
shapes='a',extradoc="""
Double gamma distribution
dgamma.pdf(x,a) = 1/(2*gamma(a))*abs(x)**(a-1)*exp(-abs(x))
for a > 0.
"""
)
## Double Weibull distribution
##
class dweibull_gen(rv_continuous):
def _rvs(self, c):
u = random(size=self._size)
return weibull_min.rvs(c, size=self._size)*(where(u>=0.5,1,-1))
def _pdf(self, x, c):
ax = abs(x)
Px = c/2.0*ax**(c-1.0)*exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return log(c) - log(2.0) + (c-1.0)*log(ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5*exp(-abs(x)**c)
return where(x > 0, 1-Cx1, Cx1)
def _ppf_skip(self, q, c):
fac = where(q<=0.5,2*q,2*q-1)
fac = pow(arr(log(1.0/fac)),1.0/c)
return where(q>0.5,fac,-fac)
def _stats(self, c):
var = gam(1+2.0/c)
return 0.0, var, 0.0, gam(1+4.0/c)/var
dweibull = dweibull_gen(name='dweibull',longname="A double Weibull",
shapes='c',extradoc="""
Double Weibull distribution
dweibull.pdf(x,c) = c/2*abs(x)**(c-1)*exp(-abs(x)**c)
"""
)
## ERLANG
##
## Special case of the Gamma distribution with shape parameter an integer.
##
class erlang_gen(rv_continuous):
def _rvs(self, n):
return gamma.rvs(n,size=self._size)
def _arg_check(self, n):
return (n > 0) & (floor(n)==n)
def _pdf(self, x, n):
Px = (x)**(n-1.0)*exp(-x)/special.gamma(n)
return Px
def _logpdf(self, x, n):
return (n-1.0)*log(x) - x - gamln(n)
def _cdf(self, x, n):
return special.gdtr(1.0,n,x)
def _sf(self, x, n):
return special.gdtrc(1.0,n,x)
def _ppf(self, q, n):
return special.gdtrix(1.0, n, q)
def _stats(self, n):
n = n*1.0
return n, n, 2/sqrt(n), 6/n
def _entropy(self, n):
return special.psi(n)*(1-n) + 1 + gamln(n)
erlang = erlang_gen(a=0.0,name='erlang',longname='An Erlang',
shapes='n',extradoc="""
Erlang distribution (Gamma with integer shape parameter)
"""
)
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
## scale == 1.0 / lambda
class expon_gen(rv_continuous):
def _rvs(self):
return mtrand.standard_exponential(self._size)
def _pdf(self, x):
return exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -expm1(-x)
def _ppf(self, q):
return -log1p(-q)
def _sf(self,x):
return exp(-x)
def _logsf(self, x):
return -x
def _isf(self,q):
return -log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
expon = expon_gen(a=0.0,name='expon',longname="An exponential",
extradoc="""
Exponential distribution
expon.pdf(x) = exp(-x)
for x >= 0.
scale = 1.0 / lambda
"""
)
## Exponentiated Weibull
class exponweib_gen(rv_continuous):
def _pdf(self, x, a, c):
exc = exp(-x**c)
return a*c*(1-exc)**arr(a-1) * exc * x**(c-1)
def _logpdf(self, x, a, c):
exc = exp(-x**c)
return log(a) + log(c) + (a-1.)*log(1-exc) - x**c + (c-1.0)*log(x)
def _cdf(self, x, a, c):
exm1c = -expm1(-x**c)
return arr((exm1c)**a)
def _ppf(self, q, a, c):
return (-log1p(-q**(1.0/a)))**arr(1.0/c)
exponweib = exponweib_gen(a=0.0,name='exponweib',
longname="An exponentiated Weibull",
shapes="a, c",extradoc="""
Exponentiated Weibull distribution
exponweib.pdf(x,a,c) = a*c*(1-exp(-x**c))**(a-1)*exp(-x**c)*x**(c-1)
for x > 0, a, c > 0.
"""
)
## Exponential Power
class exponpow_gen(rv_continuous):
def _pdf(self, x, b):
xbm1 = arr(x**(b-1.0))
xb = xbm1 * x
return exp(1)*b*xbm1 * exp(xb - exp(xb))
def _logpdf(self, x, b):
xb = x**(b-1.0)*x
return 1 + log(b) + (b-1.0)*log(x) + xb - exp(xb)
def _cdf(self, x, b):
xb = arr(x**b)
return -expm1(-expm1(xb))
def _sf(self, x, b):
xb = arr(x**b)
return exp(-expm1(xb))
def _isf(self, x, b):
return (log1p(-log(x)))**(1./b)
def _ppf(self, q, b):
return pow(log1p(-log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0,name='exponpow',longname="An exponential power",
shapes='b',extradoc="""
Exponential Power distribution
exponpow.pdf(x,b) = b*x**(b-1) * exp(1+x**b - exp(x**b))
for x >= 0, b > 0.
"""
)
## Fatigue-Life (Birnbaum-Sanders)
class fatiguelife_gen(rv_continuous):
def _rvs(self, c):
z = norm.rvs(size=self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*sqrt(1 + x2)
return t
def _pdf(self, x, c):
return (x+1)/arr(2*c*sqrt(2*pi*x**3))*exp(-(x-1)**2/arr((2.0*x*c**2)))
def _logpdf(self, x, c):
return log(x+1) - (x-1)**2 / (2.0*x*c**2) - log(2*c) - 0.5*(log(2*pi) + 3*log(x))
def _cdf(self, x, c):
return special.ndtr(1.0/c*(sqrt(x)-1.0/arr(sqrt(x))))
def _ppf(self, q, c):
tmp = c*special.ndtri(q)
return 0.25*(tmp + sqrt(tmp**2 + 4))**2
def _stats(self, c):
c2 = c*c
mu = c2 / 2.0 + 1
den = 5*c2 + 4
mu2 = c2*den /4.0
g1 = 4*c*sqrt(11*c2+6.0)/den**1.5
g2 = 6*c2*(93*c2+41.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0,name='fatiguelife',
longname="A fatigue-life (Birnbaum-Sanders)",
shapes='c',extradoc="""
Fatigue-life (Birnbaum-Sanders) distribution
fatiguelife.pdf(x,c) = (x+1)/(2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
for x > 0.
"""
)
## Folded Cauchy
class foldcauchy_gen(rv_continuous):
def _rvs(self, c):
return abs(cauchy.rvs(loc=c,size=self._size))
def _pdf(self, x, c):
return 1.0/pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/pi*(arctan(x-c) + arctan(x+c))
def _stats(self, c):
return inf, inf, nan, nan
# setting xb=1000 allows to calculate ppf for up to q=0.9993
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy',xb=1000,
longname = "A folded Cauchy",
shapes='c',extradoc="""
A folded Cauchy distributions
foldcauchy.pdf(x,c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
for x >= 0.
"""
)
## F
class f_gen(rv_continuous):
def _rvs(self, dfn, dfd):
return mtrand.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
# n = arr(1.0*dfn)
# m = arr(1.0*dfd)
# Px = m**(m/2) * n**(n/2) * x**(n/2-1)
# Px /= (m+n*x)**((n+m)/2)*special.beta(n/2,m/2)
return exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0*dfn
m = 1.0*dfd
lPx = m/2*log(m) + n/2*log(n) + (n/2-1)*log(x)
lPx -= ((n+m)/2)*log(m+n*x) + special.betaln(n/2,m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return special.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return special.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return special.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v2 = arr(dfd*1.0)
v1 = arr(dfn*1.0)
mu = where (v2 > 2, v2 / arr(v2 - 2), inf)
mu2 = 2*v2*v2*(v2+v1-2)/(v1*(v2-2)**2 * (v2-4))
mu2 = where(v2 > 4, mu2, inf)
g1 = 2*(v2+2*v1-2)/(v2-6)*sqrt((2*v2-4)/(v1*(v2+v1-2)))
g1 = where(v2 > 6, g1, nan)
g2 = 3/(2*v2-16)*(8+g1*g1*(v2-6))
g2 = where(v2 > 8, g2, nan)
return mu, mu2, g1, g2
f = f_gen(a=0.0,name='f',longname='An F',shapes="dfn, dfd",
extradoc="""
F distribution
df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
F.pdf(x,df1,df2) = --------------------------------------------
(df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
for x > 0.
"""
)
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
def _rvs(self, c):
return abs(norm.rvs(loc=c,size=self._size))
def _pdf(self, x, c):
return sqrt(2.0/pi)*cosh(c*x)*exp(-(x*x+c*c)/2.0)
def _cdf(self, x, c,):
return special.ndtr(x-c) + special.ndtr(x+c) - 1.0
def _stats(self, c):
fac = special.erf(c/sqrt(2))
mu = sqrt(2.0/pi)*exp(-0.5*c*c)+c*fac
mu2 = c*c + 1 - mu*mu
c2 = c*c
g1 = sqrt(2/pi)*exp(-1.5*c2)*(4-pi*exp(c2)*(2*c2+1.0))
g1 += 2*c*fac*(6*exp(-c2) + 3*sqrt(2*pi)*c*exp(-c2/2.0)*fac + \
pi*c*(fac*fac-1))
g1 /= pi*mu2**1.5
g2 = c2*c2+6*c2+3+6*(c2+1)*mu*mu - 3*mu**4
g2 -= 4*exp(-c2/2.0)*mu*(sqrt(2.0/pi)*(c2+2)+c*(c2+3)*exp(c2/2.0)*fac)
g2 /= mu2**2.0
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0,name='foldnorm',longname='A folded normal',
shapes='c',extradoc="""
Folded normal distribution
foldnormal.pdf(x,c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
for c >= 0.
"""
)
## Extreme Value Type II or Frechet
## (defined in Regress+ documentation as Extreme LB) as
## a limiting value distribution.
##
class frechet_r_gen(rv_continuous):
def _pdf(self, x, c):
return c*pow(x,c-1)*exp(-pow(x,c))
def _logpdf(self, x, c):
return log(c) + (c-1)*log(x) - pow(x,c)
def _cdf(self, x, c):
return -expm1(-pow(x,c))
def _ppf(self, q, c):
return pow(-log1p(-q),1.0/c)
def _munp(self, n, c):
return special.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_r = frechet_r_gen(a=0.0,name='frechet_r',longname="A Frechet right",
shapes='c',extradoc="""
A Frechet (right) distribution (also called Weibull minimum)
frechet_r.pdf(x,c) = c*x**(c-1)*exp(-x**c)
for x > 0, c > 0.
"""
)
weibull_min = frechet_r_gen(a=0.0,name='weibull_min',
longname="A Weibull minimum",
shapes='c',extradoc="""
A Weibull minimum distribution (also called a Frechet (right) distribution)
weibull_min.pdf(x,c) = c*x**(c-1)*exp(-x**c)
for x > 0, c > 0.
"""
)
class frechet_l_gen(rv_continuous):
def _pdf(self, x, c):
return c*pow(-x,c-1)*exp(-pow(-x,c))
def _cdf(self, x, c):
return exp(-pow(-x,c))
def _ppf(self, q, c):
return -pow(-log(q),1.0/c)
def _munp(self, n, c):
val = special.gamma(1.0+n*1.0/c)
if (int(n) % 2): sgn = -1
else: sgn = 1
return sgn*val
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_l = frechet_l_gen(b=0.0,name='frechet_l',longname="A Frechet left",
shapes='c',extradoc="""
A Frechet (left) distribution (also called Weibull maximum)
frechet_l.pdf(x,c) = c * (-x)**(c-1) * exp(-(-x)**c)
for x < 0, c > 0.
"""
)
weibull_max = frechet_l_gen(b=0.0,name='weibull_max',
longname="A Weibull maximum",
shapes='c',extradoc="""
A Weibull maximum distribution (also called a Frechet (left) distribution)
weibull_max.pdf(x,c) = c * (-x)**(c-1) * exp(-(-x)**c)
for x < 0, c > 0.
"""
)
## Generalized Logistic
##
class genlogistic_gen(rv_continuous):
def _pdf(self, x, c):
Px = c*exp(-x)/(1+exp(-x))**(c+1.0)
return Px
def _logpdf(self, x, c):
return log(c) - x - (c+1.0)*log1p(exp(-x))
def _cdf(self, x, c):
Cx = (1+exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -log(pow(q,-1.0/c)-1)
return vals
def _stats(self, c):
zeta = special.zeta
mu = _EULER + special.psi(c)
mu2 = pi*pi/6.0 + zeta(2,c)
g1 = -2*zeta(3,c) + 2*_ZETA3
g1 /= mu2**1.5
g2 = pi**4/15.0 + 6*zeta(4,c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic',
longname="A generalized logistic",
shapes='c',extradoc="""
Generalized logistic distribution
genlogistic.pdf(x,c) = c*exp(-x) / (1+exp(-x))**(c+1)
for x > 0, c > 0.
"""
)
## Generalized Pareto
class genpareto_gen(rv_continuous):
def _argcheck(self, c):
c = arr(c)
self.b = where(c < 0, 1.0/abs(c), inf)
return where(c==0, 0, 1)
def _pdf(self, x, c):
Px = pow(1+c*x,arr(-1.0-1.0/c))
return Px
def _logpdf(self, x, c):
return (-1.0-1.0/c) * np.log1p(c*x)
def _cdf(self, x, c):
return 1.0 - pow(1+c*x,arr(-1.0/c))
def _ppf(self, q, c):
vals = 1.0/c * (pow(1-q, -c)-1)
return vals
def _munp(self, n, c):
k = arange(0,n+1)
val = (-1.0/c)**n * sum(comb(n,k)*(-1)**k / (1.0-c*k),axis=0)
return where(c*n < 1, val, inf)
def _entropy(self, c):
if (c > 0):
return 1+c
else:
self.b = -1.0 / c
return rv_continuous._entropy(self, c)
genpareto = genpareto_gen(a=0.0,name='genpareto',
longname="A generalized Pareto",
shapes='c',extradoc="""
Generalized Pareto distribution
genpareto.pdf(x,c) = (1+c*x)**(-1-1/c)
for c != 0, and for x >= 0 for all c, and x < 1/abs(c) for c < 0.
"""
)
## Generalized Exponential
class genexpon_gen(rv_continuous):
def _pdf(self, x, a, b, c):
return (a+b*(-expm1(-c*x)))*exp((-a-b)*x+b*(-expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -expm1((-a-b)*x + b*(-expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-expm1(-c*x))) + (-a-b)*x+b*(-expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0,name='genexpon',
longname='A generalized exponential',
shapes='a, b, c',extradoc="""
Generalized exponential distribution (Ryu 1993)
f(x,a,b,c) = (a+b*(1-exp(-c*x))) * exp(-a*x-b*x+b/c*(1-exp(-c*x)))
for x >= 0, a,b,c > 0.
a, b, c are the first, second and third shape parameters.
References
----------
"The Exponential Distribution: Theory, Methods and Applications",
N. Balakrishnan, Asit P. Basu
"""
)
## Generalized Extreme Value
## c=0 is just gumbel distribution.
## This version does now accept c==0
## Use gumbel_r for c==0
# new version by Per Brodtkorb, see ticket:767
# also works for c==0, special case is gumbel_r
# increased precision for small c
class genextreme_gen(rv_continuous):
def _argcheck(self, c):
min = np.minimum
max = np.maximum
sml = floatinfo.machar.xmin
#self.b = where(c > 0, 1.0 / c,inf)
#self.a = where(c < 0, 1.0 / c, -inf)
self.b = where(c > 0, 1.0 / max(c, sml),inf)
self.a = where(c < 0, 1.0 / min(c,-sml), -inf)
return where(abs(c)==inf, 0, 1) #True #(c!=0)
def _pdf(self, x, c):
## ex2 = 1-c*x
## pex2 = pow(ex2,1.0/c)
## p2 = exp(-pex2)*pex2/ex2
## return p2
cx = c*x
logex2 = where((c==0)*(x==x),0.0,log1p(-cx))
logpex2 = where((c==0)*(x==x),-x,logex2/c)
pex2 = exp(logpex2)
# % Handle special cases
logpdf = where((cx==1) | (cx==-inf),-inf,-pex2+logpex2-logex2)
putmask(logpdf,(c==1) & (x==1),0.0) # logpdf(c==1 & x==1) = 0; % 0^0 situation
return exp(logpdf)
def _cdf(self, x, c):
#return exp(-pow(1-c*x,1.0/c))
loglogcdf = where((c==0)*(x==x),-x,log1p(-c*x)/c)
return exp(-exp(loglogcdf))
def _ppf(self, q, c):
#return 1.0/c*(1.-(-log(q))**c)
x = -log(-log(q))
return where((c==0)*(x==x),x,-expm1(-c*x)/c)
def _stats(self,c):
g = lambda n : gam(n*c+1)
g1 = g(1)
g2 = g(2)
g3 = g(3);
g4 = g(4)
g2mg12 = where(abs(c)<1e-7,(c*pi)**2.0/6.0,g2-g1**2.0)
gam2k = where(abs(c)<1e-7,pi**2.0/6.0, expm1(gamln(2.0*c+1.0)-2*gamln(c+1.0))/c**2.0);
eps = 1e-14
gamk = where(abs(c)<eps,-_EULER,expm1(gamln(c+1))/c)
m = where(c<-1.0,nan,-gamk)
v = where(c<-0.5,nan,g1**2.0*gam2k)
#% skewness
sk1 = where(c<-1./3,nan,np.sign(c)*(-g3+(g2+2*g2mg12)*g1)/((g2mg12)**(3./2.)));
sk = where(abs(c)<=eps**0.29,12*sqrt(6)*_ZETA3/pi**3,sk1)
#% The kurtosis is:
ku1 = where(c<-1./4,nan,(g4+(-4*g3+3*(g2+g2mg12)*g1)*g1)/((g2mg12)**2))
ku = where(abs(c)<=(eps)**0.23,12.0/5.0,ku1-3.0)
return m,v,sk,ku
def _munp(self, n, c):
k = arange(0,n+1)
vals = 1.0/c**n * sum(comb(n,k) * (-1)**k * special.gamma(c*k + 1),axis=0)
return where(c*n > -1, vals, inf)
genextreme = genextreme_gen(name='genextreme',
longname="A generalized extreme value",
shapes='c',extradoc="""
Generalized extreme value (see gumbel_r for c=0)
genextreme.pdf(x,c) = exp(-exp(-x))*exp(-x) for c==0
genextreme.pdf(x,c) = exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1)
for x <= 1/c, c > 0
"""
)
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
def _rvs(self, a):
return mtrand.standard_gamma(a, self._size)
def _pdf(self, x, a):
return x**(a-1)*exp(-x)/special.gamma(a)
def _logpdf(self, x, a):
return (a-1)*log(x) - x - gamln(a)
def _cdf(self, x, a):
return special.gammainc(a, x)
def _ppf(self, q, a):
return special.gammaincinv(a,q)
def _stats(self, a):
return a, a, 2.0/sqrt(a), 6.0/a
def _entropy(self, a):
return special.psi(a)*(1-a) + 1 + gamln(a)
def _fitstart(self, data):
a = 4 / _skew(data)**2
return super(gamma_gen, self)._fitstart(data, args=(a,))
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc == 0:
xbar = ravel(data).mean()
logx_bar = ravel(log(data)).mean()
s = log(xbar) - logx_bar
def func(a):
return log(a) - special.digamma(a) - s
aest = (3-s + math.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
scale = xbar / a
return a, floc, scale
else:
return super(gamma_gen, self).fit(data, *args, **kwds)
gamma = gamma_gen(a=0.0,name='gamma',longname='A gamma',
shapes='a',extradoc="""
Gamma distribution
For a = integer, this is the Erlang distribution, and for a=1 it is the
exponential distribution.
gamma.pdf(x,a) = x**(a-1)*exp(-x)/gamma(a)
for x >= 0, a > 0.
"""
)
# Generalized Gamma
class gengamma_gen(rv_continuous):
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return abs(c)* exp((c*a-1)*log(x)-x**c- gamln(a))
def _cdf(self, x, a, c):
val = special.gammainc(a,x**c)
cond = c + 0*val
return where(cond>0,val,1-val)
def _ppf(self, q, a, c):
val1 = special.gammaincinv(a,q)
val2 = special.gammaincinv(a,1.0-q)
ic = 1.0/c
cond = c+0*val1
return where(cond > 0,val1**ic,val2**ic)
def _munp(self, n, a, c):
return special.gamma(a+n*1.0/c) / special.gamma(a)
def _entropy(self, a,c):
val = special.psi(a)
return a*(1-val) + 1.0/c*val + gamln(a)-log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma',
longname='A generalized gamma',
shapes="a, c", extradoc="""
Generalized gamma distribution
gengamma.pdf(x,a,c) = abs(c)*x**(c*a-1)*exp(-x**c)/gamma(a)
for x > 0, a > 0, and c != 0.
"""
)
## Generalized Half-Logistic
##
class genhalflogistic_gen(rv_continuous):
def _argcheck(self, c):
self.b = 1.0 / c
return (c > 0)
def _pdf(self, x, c):
limit = 1.0/c
tmp = arr(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = arr(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self,c):
return 2 - (2*c+1)*log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic',
longname="A generalized half-logistic",
shapes='c',extradoc="""
Generalized half-logistic
genhalflogistic.pdf(x,c) = 2*(1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
for 0 <= x <= 1/c, and c > 0.
"""
)
## Gompertz (Truncated Gumbel)
## Defined for x>=0
class gompertz_gen(rv_continuous):
def _pdf(self, x, c):
ex = exp(x)
return c*ex*exp(-c*(ex-1))
def _cdf(self, x, c):
return 1.0-exp(-c*(exp(x)-1))
def _ppf(self, q, c):
return log(1-1.0/c*log(1-q))
def _entropy(self, c):
return 1.0 - log(c) - exp(c)*special.expn(1,c)
gompertz = gompertz_gen(a=0.0, name='gompertz',
longname="A Gompertz (truncated Gumbel) distribution",
shapes='c',extradoc="""
Gompertz (truncated Gumbel) distribution
gompertz.pdf(x,c) = c*exp(x) * exp(-c*(exp(x)-1))
for x >= 0, c > 0.
"""
)
## Gumbel, Log-Weibull, Fisher-Tippett, Gompertz
## The left-skewed gumbel distribution.
## and right-skewed are available as gumbel_l and gumbel_r
class gumbel_r_gen(rv_continuous):
def _pdf(self, x):
ex = exp(-x)
return ex*exp(-ex)
def _logpdf(self, x):
return -x - exp(-x)
def _cdf(self, x):
return exp(-exp(-x))
def _logcdf(self, x):
return -exp(-x)
def _ppf(self, q):
return -log(-log(q))
def _stats(self):
return _EULER, pi*pi/6.0, \
12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return 1.0608407169541684911
gumbel_r = gumbel_r_gen(name='gumbel_r',longname="A (right-skewed) Gumbel",
extradoc="""
Right-skewed Gumbel (Log-Weibull, Fisher-Tippett, Gompertz) distribution
gumbel_r.pdf(x) = exp(-(x+exp(-x)))
"""
)
class gumbel_l_gen(rv_continuous):
def _pdf(self, x):
ex = exp(x)
return ex*exp(-ex)
def _logpdf(self, x):
return x - exp(x)
def _cdf(self, x):
return 1.0-exp(-exp(x))
def _ppf(self, q):
return log(-log(1-q))
def _stats(self):
return -_EULER, pi*pi/6.0, \
-12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return 1.0608407169541684911
gumbel_l = gumbel_l_gen(name='gumbel_l',longname="A left-skewed Gumbel",
extradoc="""
Left-skewed Gumbel distribution
gumbel_l.pdf(x) = exp(x - exp(x))
"""
)
# Half-Cauchy
class halfcauchy_gen(rv_continuous):
def _pdf(self, x):
return 2.0/pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/pi) - np.log1p(x*x)
def _cdf(self, x):
return 2.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi/2*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(2*pi)
halfcauchy = halfcauchy_gen(a=0.0,name='halfcauchy',
longname="A Half-Cauchy",extradoc="""
Half-Cauchy distribution
halfcauchy.pdf(x) = 2/(pi*(1+x**2))
for x >= 0.
"""
)
## Half-Logistic
##
class halflogistic_gen(rv_continuous):
def _pdf(self, x):
return 0.5/(cosh(x/2.0))**2.0
def _cdf(self, x):
return tanh(x/2.0)
def _ppf(self, q):
return 2*arctanh(q)
def _munp(self, n):
if n==1: return 2*log(2)
if n==2: return pi*pi/3.0
if n==3: return 9*_ZETA3
if n==4: return 7*pi**4 / 15.0
return 2*(1-pow(2.0,1-n))*special.gamma(n+1)*special.zeta(n,1)
def _entropy(self):
return 2-log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic',
longname="A half-logistic",
extradoc="""
Half-logistic distribution
halflogistic.pdf(x) = 2*exp(-x)/(1+exp(-x))**2 = 1/2*sech(x/2)**2
for x >= 0.
"""
)
## Half-normal = chi(1, loc, scale)
class halfnorm_gen(rv_continuous):
def _rvs(self):
return abs(norm.rvs(size=self._size))
def _pdf(self, x):
return sqrt(2.0/pi)*exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/pi) - x*x/2.0
def _cdf(self, x):
return special.ndtr(x)*2-1.0
def _ppf(self, q):
return special.ndtri((1+q)/2.0)
def _stats(self):
return sqrt(2.0/pi), 1-2.0/pi, sqrt(2)*(4-pi)/(pi-2)**1.5, \
8*(pi-3)/(pi-2)**2
def _entropy(self):
return 0.5*log(pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm',
longname="A half-normal",
extradoc="""
Half-normal distribution
halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
for x > 0.
"""
)
## Hyperbolic Secant
class hypsecant_gen(rv_continuous):
def _pdf(self, x):
return 1.0/(pi*cosh(x))
def _cdf(self, x):
return 2.0/pi*arctan(exp(x))
def _ppf(self, q):
return log(tan(pi*q/2.0))
def _stats(self):
return 0, pi*pi/4, 0, 2
def _entropy(self):
return log(2*pi)
hypsecant = hypsecant_gen(name='hypsecant',longname="A hyperbolic secant",
extradoc="""
Hyperbolic secant distribution
hypsecant.pdf(x) = 1/pi * sech(x)
"""
)
## Gauss Hypergeometric
class gausshyper_gen(rv_continuous):
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c==c) & (z==z)
def _pdf(self, x, a, b, c, z):
Cinv = gam(a)*gam(b)/gam(a+b)*special.hyp2f1(c,a,a+b,-z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = special.beta(n+a,b) / special.beta(a,b)
num = special.hyp2f1(c,a+n,a+b+n,-z)
den = special.hyp2f1(c,a,a+b,-z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper',
longname="A Gauss hypergeometric",
shapes="a, b, c, z",
extradoc="""
Gauss hypergeometric distribution
gausshyper.pdf(x,a,b,c,z) = C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
for 0 <= x <= 1, a > 0, b > 0, and
C = 1/(B(a,b)F[2,1](c,a;a+b;-z))
"""
)
## Inverted Gamma
# special case of generalized gamma with c=-1
#
class invgamma_gen(rv_continuous):
def _pdf(self, x, a):
return exp(self._logpdf(x,a))
def _logpdf(self, x, a):
return (-(a+1)*log(x)-gamln(a) - 1.0/x)
def _cdf(self, x, a):
return 1.0-special.gammainc(a, 1.0/x)
def _ppf(self, q, a):
return 1.0/special.gammaincinv(a,1-q)
def _munp(self, n, a):
return exp(gamln(a-n) - gamln(a))
def _entropy(self, a):
return a - (a+1.0)*special.psi(a) + gamln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma',longname="An inverted gamma",
shapes='a',extradoc="""
Inverted gamma distribution
invgamma.pdf(x,a) = x**(-a-1)/gamma(a) * exp(-1/x)
for x > 0, a > 0.
"""
)
## Inverse Normal Distribution
# scale is gamma from DATAPLOT and B from Regress
_invnorm_msg = \
"""The `invnorm` distribution will be renamed to `invgauss` after scipy 0.9"""
class invnorm_gen(rv_continuous):
def _rvs(self, mu):
warnings.warn(_invnorm_msg, DeprecationWarning)
return mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
warnings.warn(_invnorm_msg, DeprecationWarning)
return 1.0/sqrt(2*pi*x**3.0)*exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
warnings.warn(_invnorm_msg, DeprecationWarning)
return -0.5*log(2*pi) - 1.5*log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
warnings.warn(_invnorm_msg, DeprecationWarning)
fac = sqrt(1.0/x)
C1 = norm.cdf(fac*(x-mu)/mu)
C1 += exp(2.0/mu)*norm.cdf(-fac*(x+mu)/mu)
return C1
def _stats(self, mu):
warnings.warn(_invnorm_msg, DeprecationWarning)
return mu, mu**3.0, 3*sqrt(mu), 15*mu
invnorm = invnorm_gen(a=0.0, name='invnorm', longname="An inverse normal",
shapes="mu",extradoc="""
Inverse normal distribution
NOTE: `invnorm` will be renamed to `invgauss` after scipy 0.9
invnorm.pdf(x,mu) = 1/sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for x > 0.
"""
)
## Inverse Gaussian Distribution (used to be called 'invnorm'
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
def _rvs(self, mu):
return mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x**3.0)*exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*log(2*pi) - 1.5*log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = sqrt(1.0/x)
C1 = norm.cdf(fac*(x-mu)/mu)
C1 += exp(2.0/mu)*norm.cdf(-fac*(x+mu)/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss', longname="An inverse Gaussian",
shapes="mu",extradoc="""
Inverse Gaussian distribution
invgauss.pdf(x,mu) = 1/sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for x > 0.
"""
)
## Inverted Weibull
class invweibull_gen(rv_continuous):
def _pdf(self, x, c):
xc1 = x**(-c-1.0)
#xc2 = xc1*x
xc2 = x**(-c)
xc2 = exp(-xc2)
return c*xc1*xc2
def _cdf(self, x, c):
xc1 = x**(-c)
return exp(-xc1)
def _ppf(self, q, c):
return pow(-log(q),arr(-1.0/c))
def _entropy(self, c):
return 1+_EULER + _EULER / c - log(c)
invweibull = invweibull_gen(a=0,name='invweibull',
longname="An inverted Weibull",
shapes='c',extradoc="""
Inverted Weibull distribution
invweibull.pdf(x,c) = c*x**(-c-1)*exp(-x**(-c))
for x > 0, c > 0.
"""
)
## Johnson SB
class johnsonsb_gen(rv_continuous):
def _argcheck(self, a, b):
return (b > 0) & (a==a)
def _pdf(self, x, a, b):
trm = norm.pdf(a+b*log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return norm.cdf(a+b*log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0/(1+exp(-1.0/b*(norm.ppf(q)-a)))
johnsonsb = johnsonsb_gen(a=0.0,b=1.0,name='johnsonb',
longname="A Johnson SB",
shapes="a, b",extradoc="""
Johnson SB distribution
johnsonsb.pdf(x,a,b) = b/(x*(1-x)) * phi(a + b*log(x/(1-x)))
for 0 < x < 1 and a,b > 0, and phi is the normal pdf.
"""
)
## Johnson SU
class johnsonsu_gen(rv_continuous):
def _argcheck(self, a, b):
return (b > 0) & (a==a)
def _pdf(self, x, a, b):
x2 = x*x
trm = norm.pdf(a+b*log(x+sqrt(x2+1)))
return b*1.0/sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return norm.cdf(a+b*log(x+sqrt(x*x+1)))
def _ppf(self, q, a, b):
return sinh((norm.ppf(q)-a)/b)
johnsonsu = johnsonsu_gen(name='johnsonsu',longname="A Johnson SU",
shapes="a, b", extradoc="""
Johnson SU distribution
johnsonsu.pdf(x,a,b) = b/sqrt(x**2+1) * phi(a + b*log(x+sqrt(x**2+1)))
for all x, a,b > 0, and phi is the normal pdf.
"""
)
## Laplace Distribution
class laplace_gen(rv_continuous):
def _rvs(self):
return mtrand.laplace(0, 1, size=self._size)
def _pdf(self, x):
return 0.5*exp(-abs(x))
def _cdf(self, x):
return where(x > 0, 1.0-0.5*exp(-x), 0.5*exp(x))
def _ppf(self, q):
return where(q > 0.5, -log(2*(1-q)), log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return log(2)+1
laplace = laplace_gen(name='laplace', longname="A Laplace",
extradoc="""
Laplacian distribution
laplace.pdf(x) = 1/2*exp(-abs(x))
"""
)
## Levy Distribution
class levy_gen(rv_continuous):
def _pdf(self, x):
return 1/sqrt(2*pi*x)/x*exp(-1/(2*x))
def _cdf(self, x):
return 2*(1-norm._cdf(1/sqrt(x)))
def _ppf(self, q):
val = norm._ppf(1-q/2.0)
return 1.0/(val*val)
def _stats(self):
return inf, inf, nan, nan
levy = levy_gen(a=0.0,name="levy", longname = "A Levy", extradoc="""
Levy distribution
levy.pdf(x) = 1/(x*sqrt(2*pi*x)) * exp(-1/(2*x))
for x > 0.
This is the same as the Levy-stable distribution with a=1/2 and b=1.
"""
)
## Left-skewed Levy Distribution
class levy_l_gen(rv_continuous):
def _pdf(self, x):
ax = abs(x)
return 1/sqrt(2*pi*ax)/ax*exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2*norm._cdf(1/sqrt(ax))-1
def _ppf(self, q):
val = norm._ppf((q+1.0)/2)
return -1.0/(val*val)
def _stats(self):
return inf, inf, nan, nan
levy_l = levy_l_gen(b=0.0,name="levy_l", longname = "A left-skewed Levy", extradoc="""
Left-skewed Levy distribution
levy_l.pdf(x) = 1/(abs(x)*sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
for x < 0.
This is the same as the Levy-stable distribution with a=1/2 and b=-1.
"""
)
## Levy-stable Distribution (only random variates)
class levy_stable_gen(rv_continuous):
def _rvs(self, alpha, beta):
sz = self._size
TH = uniform.rvs(loc=-pi/2.0,scale=pi,size=sz)
W = expon.rvs(size=sz)
if alpha==1:
return 2/pi*(pi/2+beta*TH)*tan(TH)-beta*log((pi/2*W*cos(TH))/(pi/2+beta*TH))
# else
ialpha = 1.0/alpha
aTH = alpha*TH
if beta==0:
return W/(cos(TH)/tan(aTH)+sin(TH))*((cos(aTH)+sin(aTH)*tan(TH))/W)**ialpha
# else
val0 = beta*tan(pi*alpha/2)
th0 = arctan(val0)/alpha
val3 = W/(cos(TH)/tan(alpha*(th0+TH))+sin(TH))
res3 = val3*((cos(aTH)+sin(aTH)*tan(TH)-val0*(sin(aTH)-cos(aTH)*tan(TH)))/W)**ialpha
return res3
def _argcheck(self, alpha, beta):
if beta == -1:
self.b = 0.0
elif beta == 1:
self.a = 0.0
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _pdf(self, x, alpha, beta):
raise NotImplementedError
levy_stable = levy_stable_gen(name='levy_stable', longname="A Levy-stable",
shapes="alpha, beta", extradoc="""
Levy-stable distribution (only random variates available -- ignore other docs)
"""
)
## Logistic (special case of generalized logistic with c=1)
## Sech-squared
class logistic_gen(rv_continuous):
def _rvs(self):
return mtrand.logistic(size=self._size)
def _pdf(self, x):
ex = exp(-x)
return ex / (1+ex)**2.0
def _cdf(self, x):
return 1.0/(1+exp(-x))
def _ppf(self, q):
return -log(1.0/q-1)
def _stats(self):
return 0, pi*pi/3.0, 0, 6.0/5.0
def _entropy(self):
return 1.0
logistic = logistic_gen(name='logistic', longname="A logistic",
extradoc="""
Logistic distribution
logistic.pdf(x) = exp(-x)/(1+exp(-x))**2
"""
)
## Log Gamma
#
class loggamma_gen(rv_continuous):
def _rvs(self, c):
return log(mtrand.gamma(c, size=self._size))
def _pdf(self, x, c):
return exp(c*x-exp(x)-gamln(c))
def _cdf(self, x, c):
return special.gammainc(c, exp(x))
def _ppf(self, q, c):
return log(special.gammaincinv(c,q))
def _munp(self,n,*args):
# use generic moment calculation using ppf
return self._mom0_sc(n,*args)
loggamma = loggamma_gen(name='loggamma', longname="A log gamma", shapes='c',
extradoc="""
Log gamma distribution
loggamma.pdf(x,c) = exp(c*x-exp(x)) / gamma(c)
for all x, c > 0.
"""
)
## Log-Laplace (Log Double Exponential)
##
class loglaplace_gen(rv_continuous):
def _pdf(self, x, c):
cd2 = c/2.0
c = where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _entropy(self, c):
return log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace',
longname="A log-Laplace",shapes='c',
extradoc="""
Log-Laplace distribution (Log Double Exponential)
loglaplace.pdf(x,c) = c/2*x**(c-1) for 0 < x < 1
= c/2*x**(-c-1) for x >= 1
for c > 0.
"""
)
## Lognormal (Cobb-Douglass)
## std is a shape parameter and is the variance of the underlying
## distribution.
## the mean of the underlying distribution is log(scale)
class lognorm_gen(rv_continuous):
def _rvs(self, s):
return exp(s * norm.rvs(size=self._size))
def _pdf(self, x, s):
Px = exp(-log(x)**2 / (2*s**2))
return Px / (s*x*sqrt(2*pi))
def _cdf(self, x, s):
return norm.cdf(log(x)/s)
def _ppf(self, q, s):
return exp(s*norm._ppf(q))
def _stats(self, s):
p = exp(s*s)
mu = sqrt(p)
mu2 = p*(p-1)
g1 = sqrt((p-1))*(2+p)
g2 = numpy.polyval([1,2,3,0,-6.0],p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5*(1+log(2*pi)+2*log(s))
lognorm = lognorm_gen(a=0.0, name='lognorm',
longname='A lognormal', shapes='s',
extradoc="""
Lognormal distribution
lognorm.pdf(x,s) = 1/(s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
for x > 0, s > 0.
If log x is normally distributed with mean mu and variance sigma**2,
then x is log-normally distributed with shape paramter sigma and scale
parameter exp(mu).
"""
)
# Gibrat's distribution is just lognormal with s=1
class gilbrat_gen(lognorm_gen):
def _rvs(self):
return lognorm_gen._rvs(self, 1.0)
def _pdf(self, x):
return lognorm_gen._pdf(self, x, 1.0)
def _cdf(self, x):
return lognorm_gen._cdf(self, x, 1.0)
def _ppf(self, q):
return lognorm_gen._ppf(self, q, 1.0)
def _stats(self):
return lognorm_gen._stats(self, 1.0)
def _entropy(self):
return 0.5*log(2*pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat', longname='A Gilbrat',
extradoc="""
Gilbrat distribution
gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
"""
)
# MAXWELL
class maxwell_gen(rv_continuous):
"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``,
and given ``scale = 1.0 / sqrt(a)``, where a is the parameter used in
the Mathworld description [1]_.
Probability density function. Given by :math:`\sqrt(2/\pi)x^2 exp(-x^2/2)`
for ``x > 0``.
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0,size=self._size)
def _pdf(self, x):
return sqrt(2.0/pi)*x*x*exp(-x*x/2.0)
def _cdf(self, x):
return special.gammainc(1.5,x*x/2.0)
def _ppf(self, q):
return sqrt(2*special.gammaincinv(1.5,q))
def _stats(self):
val = 3*pi-8
return 2*sqrt(2.0/pi), 3-8/pi, sqrt(2)*(32-10*pi)/val**1.5, \
(-12*pi*pi + 160*pi - 384) / val**2.0
def _entropy(self):
return _EULER + 0.5*log(2*pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell', extradoc="""
Maxwell distribution
maxwell.pdf(x) = sqrt(2/pi) * x**2 * exp(-x**2/2)
for x > 0.
"""
)
# Mielke's Beta-Kappa
class mielke_gen(rv_continuous):
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q,s*1.0/k)
return pow(qsk/(1.0-qsk),1.0/s)
mielke = mielke_gen(a=0.0, name='mielke', longname="A Mielke's Beta-Kappa",
shapes="k, s", extradoc="""
Mielke's Beta-Kappa distribution
mielke.pdf(x,k,s) = k*x**(k-1) / (1+x**s)**(1+k/s)
for x > 0.
"""
)
# Nakagami (cf Chi)
class nakagami_gen(rv_continuous):
def _pdf(self, x, nu):
return 2*nu**nu/gam(nu)*(x**(2*nu-1.0))*exp(-nu*x*x)
def _cdf(self, x, nu):
return special.gammainc(nu,nu*x*x)
def _ppf(self, q, nu):
return sqrt(1.0/nu*special.gammaincinv(nu,q))
def _stats(self, nu):
mu = gam(nu+0.5)/gam(nu)/sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu*(1-4*nu*mu2)/2.0/nu/mu2**1.5
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami", longname="A Nakagami",
shapes='nu', extradoc="""
Nakagami distribution
nakagami.pdf(x,nu) = 2*nu**nu/gamma(nu) * x**(2*nu-1) * exp(-nu*x**2)
for x > 0, nu > 0.
"""
)
# Non-central chi-squared
# nc is lambda of definition, df is nu
class ncx2_gen(rv_continuous):
def _rvs(self, df, nc):
return mtrand.noncentral_chisquare(df,nc,self._size)
def _pdf(self, x, df, nc):
a = arr(df/2.0)
Px = exp(-nc/2.0)*special.hyp0f1(a,nc*x/4.0)
Px *= exp(-x/2.0)*x**(a-1) / arr(2**a * special.gamma(a))
return Px
def _cdf(self, x, df, nc):
return special.chndtr(x,df,nc)
def _ppf(self, q, df, nc):
return special.chndtrix(q,df,nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return df + nc, 2*val, sqrt(8)*(val+nc)/val**1.5, \
12.0*(val+2*nc)/val**2.0
ncx2 = ncx2_gen(a=0.0, name='ncx2', longname="A non-central chi-squared",
shapes="df, nc", extradoc="""
Non-central chi-squared distribution
ncx2.pdf(x,df,nc) = exp(-(nc+df)/2)*1/2*(x/nc)**((df-2)/4)
* I[(df-2)/2](sqrt(nc*x))
for x > 0.
"""
)
# Non-central F
class ncf_gen(rv_continuous):
def _rvs(self, dfn, dfd, nc):
return mtrand.noncentral_f(dfn,dfd,nc,self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
n1,n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + gamln(n1/2.)+gamln(1+n2/2.)
term -= gamln((n1+n2)/2.0)
Px = exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= special.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)),n2/2,n1/2-1)
Px /= special.beta(n1/2,n2/2)
#this function does not have a return
# drop it for now, the generic function seems to work ok
def _cdf(self, x, dfn, dfd, nc):
return special.ncfdtr(dfn,dfd,nc,x)
def _ppf(self, q, dfn, dfd, nc):
return special.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn *1.0/dfd)**n
term = gamln(n+0.5*dfn) + gamln(0.5*dfd-n) - gamln(dfd*0.5)
val *= exp(-nc / 2.0+term)
val *= special.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = where(dfd <= 2, inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = where(dfd <=4, inf, 2*(dfd*1.0/dfn)**2.0 * \
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) / \
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf', longname="A non-central F distribution",
shapes="dfn, dfd, nc", extradoc="""
Non-central F distribution
ncf.pdf(x,df1,df2,nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2)))
* df1**(df1/2) * df2**(df2/2) * x**(df1/2-1)
* (df2+df1*x)**(-(df1+df2)/2)
* gamma(df1/2)*gamma(1+df2/2)
* L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2)))
/ (B(v1/2, v2/2) * gamma((v1+v2)/2))
for df1, df2, nc > 0.
"""
)
## Student t distribution
class t_gen(rv_continuous):
def _rvs(self, df):
return mtrand.standard_t(df, size=self._size)
#Y = f.rvs(df, df, size=self._size)
#sY = sqrt(Y)
#return 0.5*sqrt(df)*(sY-1.0/sY)
def _pdf(self, x, df):
r = arr(df*1.0)
Px = exp(gamln((r+1)/2)-gamln(r/2))
Px /= sqrt(r*pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = gamln((r+1)/2)-gamln(r/2)
lPx -= 0.5*log(r*pi) + (r+1)/2*log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return special.stdtr(df, x)
def _sf(self, x, df):
return special.stdtr(df, -x)
def _ppf(self, q, df):
return special.stdtrit(df, q)
def _isf(self, q, df):
return -special.stdtrit(df, q)
def _stats(self, df):
mu2 = where(df > 2, df / (df-2.0), inf)
g1 = where(df > 3, 0.0, nan)
g2 = where(df > 4, 6.0/(df-4.0), nan)
return 0, mu2, g1, g2
t = t_gen(name='t',longname="Student's T",
shapes="df", extradoc="""
Student's T distribution
gamma((df+1)/2)
t.pdf(x,df) = -----------------------------------------------
sqrt(pi*df)*gamma(df/2)*(1+x**2/df)**((df+1)/2)
for df > 0.
"""
)
## Non-central T distribution
class nct_gen(rv_continuous):
def _rvs(self, df, nc):
return norm.rvs(loc=nc,size=self._size)*sqrt(df) / sqrt(chi2.rvs(df,size=self._size))
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*log(n) + gamln(n+1)
trm1 -= n*log(2)+nc*nc/2.+(n/2.)*log(fac1)+gamln(n/2.)
Px = exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = sqrt(2)*nc*x*special.hyp1f1(n/2+1,1.5,valF)
trm1 /= arr(fac1*special.gamma((n+1)/2))
trm2 = special.hyp1f1((n+1)/2,0.5,valF)
trm2 /= arr(sqrt(fac1)*special.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return special.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return special.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
val1 = gam((df-1.0)/2.0)
val2 = gam(df/2.0)
if 'm' in moments:
mu = nc*sqrt(df/2.0)*val1/val2
if 'v' in moments:
var = (nc*nc+1.0)*df/(df-2.0)
var -= nc*nc*df* val1**2 / 2.0 / val2**2
mu2 = var
if 's' in moments:
g1n = 2*nc*sqrt(df)*val1*((nc*nc*(2*df-7)-3)*val2**2 \
-nc*nc*(df-2)*(df-3)*val1**2)
g1d = (df-3)*sqrt(2*df*(nc*nc+1)/(df-2) - \
nc*nc*df*(val1/val2)**2) * val2 * \
(nc*nc*(df-2)*val1**2 - \
2*(nc*nc+1)*val2**2)
g1 = g1n/g1d
if 'k' in moments:
g2n = 2*(-3*nc**4*(df-2)**2 *(df-3) *(df-4)*val1**4 + \
2**(6-2*df) * nc*nc*(df-2)*(df-4)* \
(nc*nc*(2*df-7)-3)*pi* gam(df+1)**2 - \
4*(nc**4*(df-5)-6*nc*nc-3)*(df-3)*val2**4)
g2d = (df-3)*(df-4)*(nc*nc*(df-2)*val1**2 - \
2*(nc*nc+1)*val2)**2
g2 = g2n / g2d
return mu, mu2, g1, g2
nct = nct_gen(name="nct", longname="A Noncentral T",
shapes="df, nc", extradoc="""
Non-central Student T distribution
df**(df/2) * gamma(df+1)
nct.pdf(x,df,nc) = --------------------------------------------------
2**df*exp(nc**2/2)*(df+x**2)**(df/2) * gamma(df/2)
for df > 0, nc > 0.
"""
)
# Pareto
class pareto_gen(rv_continuous):
def _pdf(self, x, b):
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = extract(mask,b)
mu = valarray(shape(b),value=inf)
mu = place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = extract( mask,b)
mu2 = valarray(shape(b), value=inf)
mu2 = place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = extract( mask,b)
g1 = valarray(shape(b), value=nan)
vals = 2*(bt+1.0)*sqrt(b-2.0)/((b-3.0)*sqrt(b))
g1 = place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = extract( mask,b)
g2 = valarray(shape(b), value=nan)
vals = 6.0*polyval([1.0,1.0,-6,-2],bt)/ \
polyval([1.0,-7.0,12.0,0.0],bt)
g2 = place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - log(c)
pareto = pareto_gen(a=1.0, name="pareto", longname="A Pareto",
shapes="b", extradoc="""
Pareto distribution
pareto.pdf(x,b) = b/x**(b+1)
for x >= 1, b > 0.
"""
)
# LOMAX (Pareto of the second kind.)
# Special case of Pareto of the first kind (location=-1.0)
class lomax_gen(rv_continuous):
def _pdf(self, x, c):
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return log(c) - (c+1)*log(1+x)
def _cdf(self, x, c):
return 1.0-1.0/(1.0+x)**c
def _sf(self, x, c):
return 1.0/(1.0+x)**c
def _logsf(self, x, c):
return -c*log(1+x)
def _ppf(self, q, c):
return pow(1.0-q,-1.0/c)-1
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-log(c)
lomax = lomax_gen(a=0.0, name="lomax",
longname="A Lomax (Pareto of the second kind)",
shapes="c", extradoc="""
Lomax (Pareto of the second kind) distribution
lomax.pdf(x,c) = c / (1+x)**(c+1)
for x >= 0, c > 0.
"""
)
## Power-function distribution
## Special case of beta dist. with d =1.0
class powerlaw_gen(rv_continuous):
def _pdf(self, x, a):
return a*x**(a-1.0)
def _logpdf(self, x, a):
return log(a) + (a-1)*log(x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return a/(a+1.0), a*(a+2.0)/(a+1.0)**2, \
2*(1.0-a)*sqrt((a+2.0)/(a*(a+3.0))), \
6*polyval([1,-1,-6,2],a)/(a*(a+3.0)*(a+4))
def _entropy(self, a):
return 1 - 1.0/a - log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw",
longname="A power-function",
shapes="a", extradoc="""
Power-function distribution
powerlaw.pdf(x,a) = a*x**(a-1)
for 0 <= x <= 1, a > 0.
"""
)
# Power log normal
class powerlognorm_gen(rv_continuous):
def _pdf(self, x, c, s):
return c/(x*s)*norm.pdf(log(x)/s)*pow(norm.cdf(-log(x)/s),c*1.0-1.0)
def _cdf(self, x, c, s):
return 1.0 - pow(norm.cdf(-log(x)/s),c*1.0)
def _ppf(self, q, c, s):
return exp(-s*norm.ppf(pow(1.0-q,1.0/c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm",
longname="A power log-normal",
shapes="c, s", extradoc="""
Power log-normal distribution
powerlognorm.pdf(x,c,s) = c/(x*s) * phi(log(x)/s) * (Phi(-log(x)/s))**(c-1)
where phi is the normal pdf, and Phi is the normal cdf, and x > 0, s,c > 0.
"""
)
# Power Normal
class powernorm_gen(rv_continuous):
def _pdf(self, x, c):
return c*_norm_pdf(x)* \
(_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -norm.ppf(pow(1.0-q,1.0/c))
powernorm = powernorm_gen(name='powernorm', longname="A power normal",
shapes="c", extradoc="""
Power normal distribution
powernorm.pdf(x,c) = c * phi(x)*(Phi(-x))**(c-1)
where phi is the normal pdf, and Phi is the normal cdf, and x > 0, c > 0.
"""
)
# R-distribution ( a general-purpose distribution with a
# variety of shapes.
# FIXME: PPF does not work.
class rdist_gen(rv_continuous):
def _pdf(self, x, c):
return np.power((1.0-x*x),c/2.0-1) / special.beta(0.5,c/2.0)
def _cdf_skip(self, x, c):
#error inspecial.hyp2f1 for some values see tickets 758, 759
return 0.5 + x/special.beta(0.5,c/2.0)* \
special.hyp2f1(0.5,1.0-c/2.0,1.5,x*x)
def _munp(self, n, c):
return (1-(n % 2))*special.beta((n+1.0)/2,c/2.0)
rdist = rdist_gen(a=-1.0,b=1.0, name="rdist", longname="An R-distributed",
shapes="c", extradoc="""
R-distribution
rdist.pdf(x,c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
for -1 <= x <= 1, c > 0.
"""
)
# Rayleigh distribution (this is chi with df=2 and loc=0.0)
# scale is the mode.
class rayleigh_gen(rv_continuous):
def _rvs(self):
return chi.rvs(2,size=self._size)
def _pdf(self, r):
return r*exp(-r*r/2.0)
def _cdf(self, r):
return 1.0-exp(-r*r/2.0)
def _ppf(self, q):
return sqrt(-2*log(1-q))
def _stats(self):
val = 4-pi
return np.sqrt(pi/2), val/2, 2*(pi-3)*sqrt(pi)/val**1.5, \
6*pi/val-16/val**2
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh",
longname="A Rayleigh",
extradoc="""
Rayleigh distribution
rayleigh.pdf(r) = r * exp(-r**2/2)
for x >= 0.
"""
)
# Reciprocal Distribution
class reciprocal_gen(rv_continuous):
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = log(b*1.0 / a)
return (a > 0) & (b > 0) & (b > a)
def _pdf(self, x, a, b):
# argcheck should be called before _pdf
return 1.0/(x*self.d)
def _logpdf(self, x, a, b):
return -log(x) - log(self.d)
def _cdf(self, x, a, b):
return (log(x)-log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a,q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0,n) - pow(a*1.0,n))
def _entropy(self,a,b):
return 0.5*log(a*b)+log(log(b/a))
reciprocal = reciprocal_gen(name="reciprocal",
longname="A reciprocal",
shapes="a, b", extradoc="""
Reciprocal distribution
reciprocal.pdf(x,a,b) = 1/(x*log(b/a))
for a <= x <= b, a,b > 0.
"""
)
# Rice distribution
# FIXME: PPF does not work.
class rice_gen(rv_continuous):
def _pdf(self, x, b):
return x*exp(-(x*x+b*b)/2.0)*special.i0(x*b)
def _logpdf(self, x, b):
return log(x) - (x*x + b*b)/2.0 + log(special.i0(x*b))
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1+nd2
b2 = b*b/2.0
return 2.0**(nd2)*exp(-b2)*special.gamma(n1) * \
special.hyp1f1(n1,1,b2)
rice = rice_gen(a=0.0, name="rice", longname="A Rice",
shapes="b", extradoc="""
Rician distribution
rice.pdf(x,b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
for x > 0, b > 0.
"""
)
# Reciprocal Inverse Gaussian
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
def _rvs(self, mu): #added, taken from invgauss
return 1.0/mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x)*exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*log(2*pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-exp(2.0/mu)*_norm_cdf(-isqx*trm2)
# xb=50 or something large is necessary for stats to converge without exception
recipinvgauss = recipinvgauss_gen(a=0.0, xb=50, name='recipinvgauss',
longname="A reciprocal inverse Gaussian",
shapes="mu", extradoc="""
Reciprocal inverse Gaussian
recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
for x >= 0.
"""
)
# Semicircular
class semicircular_gen(rv_continuous):
def _pdf(self, x):
return 2.0/pi*sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/pi*(x*sqrt(1-x*x) + arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0,b=1.0, name="semicircular",
longname="A semicircular",
extradoc="""
Semicircular distribution
semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
for -1 <= x <= 1.
"""
)
# Triangular
# up-sloping line from loc to (loc + c*scale) and then downsloping line from
# loc + c*scale to loc + scale
# _trstr = "Left must be <= mode which must be <= right with left < right"
class triang_gen(rv_continuous):
def _rvs(self, c):
return mtrand.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
return where(x < c, 2*x/c, 2*(1-x)/(1-c))
def _cdf(self, x, c):
return where(x < c, x*x/c, (x*x-2*x+c)/(c-1))
def _ppf(self, q, c):
return where(q < c, sqrt(c*q), 1-sqrt((1-c)*(1-q)))
def _stats(self, c):
return (c+1.0)/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \
(5*(1.0-c+c*c)**1.5), -3.0/5.0
def _entropy(self,c):
return 0.5-log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang", longname="A Triangular",
shapes="c", extradoc="""
Triangular distribution
up-sloping line from loc to (loc + c*scale) and then downsloping
for (loc + c*scale) to (loc+scale).
- standard form is in the range [0,1] with c the mode.
- location parameter shifts the start to loc
- scale changes the width from 1 to scale
"""
)
# Truncated Exponential
class truncexpon_gen(rv_continuous):
def _argcheck(self, b):
self.b = b
return (b > 0)
def _pdf(self, x, b):
return exp(-x)/(1-exp(-b))
def _logpdf(self, x, b):
return -x - log(1-exp(-b))
def _cdf(self, x, b):
return (1.0-exp(-x))/(1-exp(-b))
def _ppf(self, q, b):
return -log(1-q+q*exp(-b))
def _munp(self, n, b):
#wrong answer with formula, same as in continuous.pdf
#return gam(n+1)-special.gammainc(1+n,b)
if n == 1:
return (1-(b+1)*exp(-b))/(-expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*exp(-b))/(-expm1(-b))
else:
#return generic for higher moments
#return rv_continuous._mom1_sc(self,n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = exp(b)
return log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon',
longname="A truncated exponential",
shapes="b", extradoc="""
Truncated exponential distribution
truncexpon.pdf(x,b) = exp(-x)/(1-exp(-b))
for 0 < x < b.
"""
)
# Truncated Normal
class truncnorm_gen(rv_continuous):
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._delta = self._nb - self._na
self._logdelta = log(self._delta)
return (a != b)
# All of these assume that _argcheck is called first
# and no other thread calls _pdf before.
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
return norm._ppf(q*self._nb + self._na*(1.0-q))
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d #correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm', longname="A truncated normal",
shapes="a, b", extradoc="""
Truncated Normal distribution.
The standard form of this distribution is a standard normal truncated to the
range [a,b] --- notice that a and b are defined over the domain
of the standard normal. To convert clip values for a specific mean and
standard deviation use a,b = (myclip_a-my_mean)/my_std, (myclip_b-my_mean)/my_std
"""
)
# Tukey-Lambda
# A flexible distribution ranging from Cauchy (lam=-1)
# to logistic (lam=0.0)
# to approx Normal (lam=0.14)
# to u-shape (lam = 0.5)
# to Uniform from -1 to 1 (lam = 1)
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
def _argcheck(self, lam):
# lam in RR.
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = arr(special.tklmbda(x,lam))
Px = Fx**(lam-1.0) + (arr(1-Fx))**(lam-1.0)
Px = 1.0/arr(Px)
return where((lam <= 0) | (abs(x) < 1.0/arr(lam)), Px, 0.0)
def _cdf(self, x, lam):
return special.tklmbda(x, lam)
def _ppf(self, q, lam):
q = q*1.0
vals1 = (q**lam - (1-q)**lam)/lam
vals2 = log(q/(1-q))
return where((lam == 0)&(q==q), vals2, vals1)
def _stats(self, lam):
mu2 = 2*gam(lam+1.5)-lam*pow(4,-lam)*sqrt(pi)*gam(lam)*(1-2*lam)
mu2 /= lam*lam*(1+2*lam)*gam(1+1.5)
mu4 = 3*gam(lam)*gam(lam+0.5)*pow(2,-2*lam) / lam**3 / gam(2*lam+1.5)
mu4 += 2.0/lam**4 / (1+4*lam)
mu4 -= 2*sqrt(3)*gam(lam)*pow(2,-6*lam)*pow(3,3*lam) * \
gam(lam+1.0/3)*gam(lam+2.0/3) / (lam**3.0 * gam(2*lam+1.5) * \
gam(lam+0.5))
g2 = mu4 / mu2 / mu2 - 3.0
return 0, mu2, 0, g2
def _entropy(self, lam):
def integ(p):
return log(pow(p,lam-1)+pow(1-p,lam-1))
return integrate.quad(integ,0,1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda', longname="A Tukey-Lambda",
shapes="lam", extradoc="""
Tukey-Lambda distribution
A flexible distribution ranging from Cauchy (lam=-1)
to logistic (lam=0.0)
to approx Normal (lam=0.14)
to u-shape (lam = 0.5)
to Uniform from -1 to 1 (lam = 1)
"""
)
# Uniform
# loc to loc + scale
class uniform_gen(rv_continuous):
def _rvs(self):
return mtrand.uniform(0.0,1.0,self._size)
def _pdf(self, x):
return 1.0*(x==x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
uniform = uniform_gen(a=0.0,b=1.0, name='uniform', longname="A uniform",
extradoc="""
Uniform distribution
constant between loc and loc+scale
"""
)
# Von-Mises
# if x is not in range or loc is not in range it assumes they are angles
# and converts them to [-pi, pi] equivalents.
eps = numpy.finfo(float).eps
class vonmises_gen(rv_continuous):
def _rvs(self, b):
return mtrand.vonmises(0.0, b, size=self._size)
def _pdf(self, x, b):
return exp(b*cos(x)) / (2*pi*special.i0(b))
def _cdf(self, x, b):
return vonmises_cython.von_mises_cdf(b,x)
def _stats_skip(self, b):
return 0, None, 0, None
vonmises = vonmises_gen(name='vonmises', longname="A Von Mises",
shapes="b", extradoc="""
Von Mises distribution
if x is not in range or loc is not in range it assumes they are angles
and converts them to [-pi, pi] equivalents.
vonmises.pdf(x,b) = exp(b*cos(x)) / (2*pi*I[0](b))
for -pi <= x <= pi, b > 0.
"""
)
## Wald distribution (Inverse Normal with shape parameter mu=1.0)
class wald_gen(invgauss_gen):
"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function, `pdf`, is defined by
``1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))``, for ``x > 0``.
%(example)s
"""
def _rvs(self):
return mtrand.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald", extradoc="""
Wald distribution
wald.pdf(x) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
for x > 0.
"""
)
## Weibull
## See Frechet
# Wrapped Cauchy
class wrapcauchy_gen(rv_continuous):
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
return (1.0-c*c)/(2*pi*(1+c*c-2*c*cos(x)))
def _cdf(self, x, c):
output = 0.0*x
val = (1.0+c)/(1.0-c)
c1 = x<pi
c2 = 1-c1
xp = extract( c1,x)
#valp = extract(c1,val)
xn = extract( c2,x)
#valn = extract(c2,val)
if (any(xn)):
valn = extract(c2, np.ones_like(x)*val)
xn = 2*pi - xn
yn = tan(xn/2.0)
on = 1.0-1.0/pi*arctan(valn*yn)
output = place(output, c2, on)
if (any(xp)):
valp = extract(c1, np.ones_like(x)*val)
yp = tan(xp/2.0)
op = 1.0/pi*arctan(valp*yp)
output = place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*arctan(val*tan(pi*q))
rcmq = 2*pi-2*arctan(val*tan(pi*(1-q)))
return where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return log(2*pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0,b=2*pi, name='wrapcauchy',
longname="A wrapped Cauchy",
shapes="c", extradoc="""
Wrapped Cauchy distribution
wrapcauchy.pdf(x,c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
for 0 <= x <= 2*pi, 0 < c < 1.
"""
)
### DISCRETE DISTRIBUTIONS
###
def entropy(pk,qk=None):
"""S = entropy(pk,qk=None)
calculate the entropy of a distribution given the p_k values
S = -sum(pk * log(pk), axis=0)
If qk is not None, then compute a relative entropy
S = sum(pk * log(pk / qk), axis=0)
Routine will normalize pk and qk if they don't sum to 1
"""
pk = arr(pk)
pk = 1.0* pk / sum(pk,axis=0)
if qk is None:
vec = where(pk == 0, 0.0, pk*log(pk))
else:
qk = arr(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / sum(qk,axis=0)
# If qk is zero anywhere, then unless pk is zero at those places
# too, the relative entropy is infinite.
if any(take(pk,nonzero(qk==0.0),axis=0)!=0.0, 0):
return inf
vec = where (pk == 0, 0.0, -pk*log(pk / qk))
return -sum(vec,axis=0)
## Handlers for generic case where xk and pk are given
def _drv_pmf(self, xk, *args):
try:
return self.P[xk]
except KeyError:
return 0.0
def _drv_cdf(self, xk, *args):
indx = argmax((self.xk>xk),axis=-1)-1
return self.F[self.xk[indx]]
def _drv_ppf(self, q, *args):
indx = argmax((self.qvals>=q),axis=-1)
return self.Finv[self.qvals[indx]]
def _drv_nonzero(self, k, *args):
return 1
def _drv_moment(self, n, *args):
n = arr(n)
return sum(self.xk**n[newaxis,...] * self.pk, axis=0)
def _drv_moment_gen(self, t, *args):
t = arr(t)
return sum(exp(self.xk * t[newaxis,...]) * self.pk, axis=0)
def _drv2_moment(self, n, *args):
'''non-central moment of discrete distribution'''
#many changes, originally not even a return
tot = 0.0
diff = 1e100
#pos = self.a
pos = max(0.0, 1.0*self.a)
count = 0
#handle cases with infinite support
ulimit = max(1000, (min(self.b,1000) + max(self.a,-1000))/2.0 )
llimit = min(-1000, (min(self.b,1000) + max(self.a,-1000))/2.0 )
while (pos <= self.b) and ((pos <= ulimit) or \
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos,*args)
# use pmf because _pmf does not check support in randint
# and there might be problems ? with correct self.a, self.b at this stage
tot += diff
pos += self.inc
count += 1
if self.a < 0: #handle case when self.a = -inf
diff = 1e100
pos = -self.inc
while (pos >= self.a) and ((pos >= llimit) or \
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos,*args)
#using pmf instead of _pmf, see above
tot += diff
pos -= self.inc
count += 1
return tot
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.invcdf_b
a = self.invcdf_a
if isinf(b): # Be sure ending point is > q
b = max(100*q,10)
while 1:
if b >= self.b: qb = 1.0; break
qb = self._cdf(b,*args)
if (qb < q): b += 10
else: break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = min(-100*q,-10)
while 1:
if a <= self.a: qb = 0.0; break
qa = self._cdf(a,*args)
if (qa > q): a -= 10
else: break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b == a+1:
#testcase: return wrong number at lower index
#python -c "from scipy.stats import zipf;print zipf.ppf(0.01,2)" wrong
#python -c "from scipy.stats import zipf;print zipf.ppf([0.01,0.61,0.77,0.83],2)"
#python -c "from scipy.stats import logser;print logser.ppf([0.1,0.66, 0.86,0.93],0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
a = c
qa = qc
elif (qc > q):
b = c
qb = qc
else:
return c
def reverse_dict(dict):
newdict = {}
sorted_keys = copy(dict.keys())
sorted_keys.sort()
for key in sorted_keys[::-1]:
newdict[dict[key]] = key
return newdict
def make_dict(keys, values):
d = {}
for key, value in zip(keys, values):
d[key] = value
return d
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances from for discrete random variables. rv_discrete can be used
to construct an arbitrary distribution with defined by a list of support
points and the corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments
values : tuple of two array_like
(xk, pk) where xk are points (integers) with positive probability pk
with sum(pk) = 1
inc : integer
increment for the support of the distribution, default: 1
other values have not been tested
badvalue : object, optional
The value in (masked) arrays that indicates a value that should be
ignored.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the first two arguments for all
its methods.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
generic.rvs(<shape(s)>, loc=0, size=1)
random variates
generic.pmf(x, <shape(s)>, loc=0)
probability mass function
logpmf(x, <shape(s)>, loc=0)
log of the probability density function
generic.cdf(x, <shape(s)>, loc=0)
cumulative density function
generic.logcdf(x, <shape(s)>, loc=0)
log of the cumulative density function
generic.sf(x, <shape(s)>, loc=0)
survival function (1-cdf --- sometimes more accurate)
generic.logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
generic.ppf(q, <shape(s)>, loc=0)
percent point function (inverse of cdf --- percentiles)
generic.isf(q, <shape(s)>, loc=0)
inverse survival function (inverse of sf)
generic.moment(n, <shape(s)>, loc=0)
non-central n-th moment of the distribution. May not work for array arguments.
generic.stats(<shape(s)>, loc=0, moments='mv')
mean('m', axis=0), variance('v'), skew('s'), and/or kurtosis('k')
generic.entropy(<shape(s)>, loc=0)
entropy of the RV
generic.fit(data, <shape(s)>, loc=0)
Parameter estimates for generic data
generic.expect(func=None, args=(), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
generic.median(<shape(s)>, loc=0)
Median of the distribution.
generic.mean(<shape(s)>, loc=0)
Mean of the distribution.
generic.std(<shape(s)>, loc=0)
Standard deviation of the distribution.
generic.var(<shape(s)>, loc=0)
Variance of the distribution.
generic.interval(alpha, <shape(s)>, loc=0)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
generic(<shape(s)>, loc=0)
calling a distribution instance returns a frozen distribution
Notes
-----
Alternatively, the object may be called (as a function) to fix
the shape and location parameters returning a
"frozen" discrete RV object:
myrv = generic(<shape(s)>, loc=0)
- frozen RV object with the same methods but holding the given shape
and location fixed.
You can construct an aribtrary discrete rv where P{X=xk} = pk
by passing to the rv_discrete initialization method (through the
values=keyword) a tuple of sequences (xk, pk) which describes only those
values of X (xk) that occur with nonzero probability (pk).
To create a new discrete distribution, we would do the following::
class poisson_gen(rv_continuous):
#"Poisson distribution"
def _pmf(self, k, mu):
...
and create an instance
poisson = poisson_gen(name="poisson", shapes="mu", longname='A Poisson')
The docstring can be created from a template.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> numargs = generic.numargs
>>> [ <shape(s)> ] = ['Replace with resonable value', ]*numargs
Display frozen pmf:
>>> rv = generic(<shape(s)>)
>>> x = np.arange(0, np.min(rv.dist.b, 3)+1)
>>> h = plt.plot(x, rv.pmf(x))
Check accuracy of cdf and ppf:
>>> prb = generic.cdf(x, <shape(s)>)
>>> h = plt.semilogy(np.abs(x-generic.ppf(prb, <shape(s)>))+1e-20)
Random number generation:
>>> R = generic.rvs(<shape(s)>, size=100)
Custom made discrete distribution:
>>> vals = [arange(7), (0.1, 0.2, 0.3, 0.1, 0.1, 0.1, 0.1)]
>>> custm = rv_discrete(name='custm', values=vals)
>>> h = plt.plot(vals[0], custm.pmf(vals[0]))
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8,values=None,inc=1,longname=None,
shapes=None, extradoc=None):
super(rv_generic,self).__init__()
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.invcdf_a = a # what's the difference to self.a, .b
self.invcdf_b = b
self.name = name
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = sgf(self._cdfsingle,otypes='d')
self.return_integers = 1
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
self.extradoc = extradoc
if values is not None:
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
self.xk = take(ravel(self.xk),indx, 0)
self.pk = take(ravel(self.pk),indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = make_dict(self.xk, self.pk)
self.qvals = numpy.cumsum(self.pk,axis=0)
self.F = make_dict(self.xk, self.qvals)
self.Finv = reverse_dict(self.F)
self._ppf = instancemethod(sgf(_drv_ppf,otypes='d'),
self, rv_discrete)
self._pmf = instancemethod(sgf(_drv_pmf,otypes='d'),
self, rv_discrete)
self._cdf = instancemethod(sgf(_drv_cdf,otypes='d'),
self, rv_discrete)
self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)
self.generic_moment = instancemethod(_drv_moment,
self, rv_discrete)
self.moment_gen = instancemethod(_drv_moment_gen,
self, rv_discrete)
self.numargs=0
else:
cdf_signature = inspect.getargspec(self._cdf.im_func)
numargs1 = len(cdf_signature[0]) - 2
pmf_signature = inspect.getargspec(self._pmf.im_func)
numargs2 = len(pmf_signature[0]) - 2
self.numargs = max(numargs1, numargs2)
#nin correction needs to be after we know numargs
#correct nin for generic moment vectorization
self.vec_generic_moment = sgf(_drv2_moment, otypes='d')
self.vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(self.vec_generic_moment,
self, rv_discrete)
#correct nin for ppf vectorization
_vppf = sgf(_drv2_ppfsingle,otypes='d')
_vppf.nin = self.numargs + 2 # +1 is for self
self._vecppf = instancemethod(_vppf,
self, rv_discrete)
#now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if self.__doc__ is None:
self._construct_default_doc(longname=longname, extradoc=extradoc)
else:
self._construct_doc()
## This only works for old-style classes...
# self.__class__.__doc__ = self.__doc__
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the rv_discrete template."""
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s discrete random variable.'%longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc()
def _construct_doc(self):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict_discrete.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(\
"\n%(shapes)s : array-like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
def _rvs(self, *args):
return self._ppf(mtrand.random_sample(self._size),*args)
def _nonzero(self, k, *args):
return floor(k)==k
def _argcheck(self, *args):
cond = 1
for arg in args:
cond &= (arg > 0)
return cond
def _pmf(self, k, *args):
return self._cdf(k,*args) - self._cdf(k-1,*args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdfsingle(self, k, *args):
m = arange(int(self.a),k+1)
return sum(self._pmf(m,*args),axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k,*args)
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x,*args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._vecppf(q, *args)
def _isf(self, q, *args):
return self._ppf(1-q,*args)
def _stats(self, *args):
return None, None, None, None
def _munp(self, n, *args):
return self.generic_moment(n, *args)
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
size : int or tuple of ints, optional
defining number of random variates (default=1)
Returns
-------
rvs : array-like
random variates of given `size`
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k,*args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
pmf : array-like
Probability mass function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._pmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k,*args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
logpmf : array-like
Log of the probability mass function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function at k of the given RV
Parameters
----------
k : array-like, int
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
cdf : array-like
Cumulative distribution function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2*(cond0==cond0), 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV
Parameters
----------
k : array-like, int
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
logcdf : array-like
Log of the cumulative distribution function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2*(cond0==cond0), 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self,k,*args,**kwds):
"""
Survival function (1-cdf) at k of the given RV
Parameters
----------
k : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
sf : array-like
Survival function evaluated at k
"""
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond),'d')
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self,k,*args,**kwds):
"""
Log of the survival function (1-cdf) at k of the given RV
Parameters
----------
k : array-like
quantiles
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
sf : array-like
Survival function evaluated at k
"""
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(arr,(k,loc))
args = tuple(map(arr,args))
k = arr(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
output = place(output,(1-cond0)*(cond1==cond1),self.badvalue)
output = place(output,cond2,0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
output = place(output,cond,self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self,q,*args,**kwds):
"""
Percent point function (inverse of cdf) at q of the given RV
Parameters
----------
q : array-like
lower tail probability
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
scale: array-like, optional
scale parameter (default=1)
Returns
-------
k : array-like
quantile corresponding to the lower tail probability, q.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
q,loc = map(arr,(q,loc))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.badvalue,typecode='d')
#output type 'd' to handle nin and inf
output = place(output,(q==0)*(cond==cond), self.a-1)
output = place(output,cond2,self.b)
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
output = place(output,cond,self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self,q,*args,**kwds):
"""
Inverse survival function (1-sf) at q of the given RV
Parameters
----------
q : array-like
upper tail probability
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
Returns
-------
k : array-like
quantile corresponding to the upper tail probability, q.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
q,loc = map(arr,(q,loc))
args = tuple(map(arr,args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
#old:
## output = valarray(shape(cond),value=self.b,typecode='d')
## #typecode 'd' to handle nin and inf
## output = place(output,(1-cond0)*(cond1==cond1), self.badvalue)
## output = place(output,cond2,self.a-1)
#same problem as with ppf
# copied from ppf and changed
output = valarray(shape(cond),value=self.badvalue,typecode='d')
#output type 'd' to handle nin and inf
output = place(output,(q==0)*(cond==cond), self.b)
output = place(output,cond2,self.a-1)
# call place only if at least 1 valid argument
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
output = place(output,cond,self._isf(*goodargs) + loc) #PB same as ticket 766
if output.ndim == 0:
return output[()]
return output
def stats(self, *args, **kwds):
"""
Some statistics of the given discrete RV
Parameters
----------
arg1, arg2, arg3,... : array-like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array-like, optional
location parameter (default=0)
moments : string, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
loc,moments=map(kwds.get,['loc','moments'])
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None: # loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and moments is None: # loc, scale, and moments
loc, moments = args[-2:]
args = args[:self.numargs]
if loc is None: loc = 0.0
if moments is None: moments = 'mv'
loc = arr(loc)
args = tuple(map(arr,args))
cond = self._argcheck(*args) & (loc==loc)
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments})
else:
mu, mu2, g1, g2 = self._stats(*args)
if g1 is None:
mu3 = None
else:
mu3 = g1*(mu2**1.5)
default = valarray(shape(cond), self.badvalue)
output = []
# Use only entries that are valid in calculation
goodargs = argsreduce(cond, *(args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
if 'm' in moments:
if mu is None:
mu = self._munp(1.0,*goodargs)
out0 = default.copy()
out0 = place(out0,cond,mu+loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
mu2 = mu2p - mu*mu
out0 = default.copy()
out0 = place(out0,cond,mu2)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
mu3 = mu3p - 3*mu*mu2 - mu**3
g1 = mu3 / mu2**1.5
out0 = default.copy()
out0 = place(out0,cond,g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
if mu3 is None:
mu3p = self._munp(3.0,*goodargs)
mu3 = mu3p - 3*mu*mu2 - mu**3
mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
out0 = place(out0,cond,g2)
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def moment(self, n, *args, **kwds): # Non-central moments in standard form.
"""
n'th non-central moment of the distribution
Parameters
----------
n: int, n>=1
order of moment
arg1, arg2, arg3,...: float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : float, optional
location parameter (default=0)
scale : float, optional
scale parameter (default=1)
"""
loc = kwds.get('loc', 0)
scale = kwds.get('scale', 1)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0): raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
dict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]}
else:
dict = {}
mu, mu2, g1, g2 = self._stats(*args,**dict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n,k,exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def freeze(self, *args, **kwds):
return rv_frozen(self, *args, **kwds)
def _entropy(self, *args):
if hasattr(self,'pk'):
return entropy(self.pk)
else:
mu = int(self.stats(*args, **{'moments':'m'}))
val = self.pmf(mu,*args)
if (val==0.0): ent = 0.0
else: ent = -val*log(val)
k = 1
term = 1.0
while (abs(term) > eps):
val = self.pmf(mu+k,*args)
if val == 0.0: term = 0.0
else: term = -val * log(val)
val = self.pmf(mu-k,*args)
if val != 0.0: term -= val*log(val)
k += 1
ent += term
return ent
def entropy(self, *args, **kwds):
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
loc = arr(loc)
args = map(arr,args)
cond0 = self._argcheck(*args) & (loc==loc)
output = zeros(shape(cond0),'d')
output = place(output,(1-cond0),self.badvalue)
goodargs = argsreduce(cond0, *args)
output = place(output,cond0,self.vecentropy(*goodargs))
return output
def __call__(self, *args, **kwds):
return self.freeze(*args,**kwds)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None, conditional=False):
"""calculate expected value of a function with respect to the distribution
for discrete distribution
Parameters
----------
fn : function (default: identity mapping)
Function for which sum is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
optional keyword parameters
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution, lb and ub are inclusive (ul<=k<=ub)
conditional : boolean (False)
If true then the expectation is corrected by the conditional
probability of the integration interval. The return value is the
expectation of the function, conditional on being in the given
interval (k such that ul<=k<=ub).
Returns
-------
expected value : float
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to evaluate
could be added as keyword parameter, to evaluate functions with
non-monotonic shapes, points include integers in (-suppnmin, suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative integers
are evaluated)
"""
#moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
#avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 #minimum number of points to evaluate (+ and -)
if func is None:
def fun(x):
#loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
#loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint
# and there might be problems(?) with correct self.a, self.b at this stage
# maybe not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc #convert bound for standardized distribution
if ub is None:
ub = (self.b)
else:
ub = ub - loc #convert bound for standardized distribution
if conditional:
if np.isposinf(ub)[()]:
#work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan
invfac = 1 - self.cdf(lb-1,*args)
else:
invfac = 1 - self.cdf(lb-1,*args) - self.sf(ub,*args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) #check limits
#print 'low, upp', low, upp
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
#handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: #handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while (pos >= lb) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
# fixme: replace with proper warning
print 'sum did not converge'
return tot/invfac
# Binomial
class binom_gen(rv_discrete):
def _rvs(self, n, pr):
return mtrand.binomial(n,pr,self._size)
def _argcheck(self, n, pr):
self.b = n
return (n>=0) & (pr >= 0) & (pr <= 1)
def _logpmf(self, x, n, pr):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) +
gamln(n-k+1)))
return combiln + k*np.log(pr) + (n-k)*np.log(1-pr)
def _pmf(self, x, n, pr):
return exp(self._logpmf(x, n, pr))
def _cdf(self, x, n, pr):
k = floor(x)
vals = special.bdtr(k,n,pr)
return vals
def _sf(self, x, n, pr):
k = floor(x)
return special.bdtrc(k,n,pr)
def _ppf(self, q, n, pr):
vals = ceil(special.bdtrik(q,n,pr))
vals1 = vals-1
temp = special.bdtr(vals1,n,pr)
return where(temp >= q, vals1, vals)
def _stats(self, n, pr):
q = 1.0-pr
mu = n * pr
var = n * pr * q
g1 = (q-pr) / sqrt(n*pr*q)
g2 = (1.0-6*pr*q)/(n*pr*q)
return mu, var, g1, g2
def _entropy(self, n, pr):
k = r_[0:n+1]
vals = self._pmf(k,n,pr)
lvals = where(vals==0,0.0,log(vals))
return -sum(vals*lvals,axis=0)
binom = binom_gen(name='binom',shapes="n, pr",extradoc="""
Binomial distribution
Counts the number of successes in *n* independent
trials when the probability of success each time is *pr*.
binom.pmf(k,n,p) = choose(n,k)*p**k*(1-p)**(n-k)
for k in {0,1,...,n}
""")
# Bernoulli distribution
class bernoulli_gen(binom_gen):
def _rvs(self, pr):
return binom_gen._rvs(self, 1, pr)
def _argcheck(self, pr):
return (pr >=0 ) & (pr <= 1)
def _logpmf(self, x, pr):
return binom._logpmf(x, 1, pr)
def _pmf(self, x, pr):
return binom._pmf(x, 1, pr)
def _cdf(self, x, pr):
return binom._cdf(x, 1, pr)
def _sf(self, x, pr):
return binom._sf(x, 1, pr)
def _ppf(self, q, pr):
return binom._ppf(q, 1, pr)
def _stats(self, pr):
return binom._stats(1, pr)
def _entropy(self, pr):
return -pr*log(pr)-(1-pr)*log(1-pr)
bernoulli = bernoulli_gen(b=1,name='bernoulli',shapes="pr",extradoc="""
Bernoulli distribution
1 if binary experiment succeeds, 0 otherwise. Experiment
succeeds with probabilty *pr*.
bernoulli.pmf(k,p) = 1-p if k = 0
= p if k = 1
for k = 0,1
"""
)
# Negative binomial
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
Probability mass function, given by
``np.choose(k+n-1, n-1) * p**n * (1-p)**k`` for ``k >= 0``.
%(example)s
"""
def _rvs(self, n, pr):
return mtrand.negative_binomial(n, pr, self._size)
def _argcheck(self, n, pr):
return (n >= 0) & (pr >= 0) & (pr <= 1)
def _pmf(self, x, n, pr):
coeff = exp(gamln(n+x) - gamln(x+1) - gamln(n))
return coeff * power(pr,n) * power(1-pr,x)
def _logpmf(self, x, n, pr):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(pr) + x*log(1-pr)
def _cdf(self, x, n, pr):
k = floor(x)
return special.betainc(n, k+1, pr)
def _sf_skip(self, x, n, pr):
#skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k,n,pr)
def _ppf(self, q, n, pr):
vals = ceil(special.nbdtrik(q,n,pr))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1,n,pr)
return where(temp >= q, vals1, vals)
def _stats(self, n, pr):
Q = 1.0 / pr
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom', shapes="n, pr", extradoc="""
Negative binomial distribution
nbinom.pmf(k,n,p) = choose(k+n-1,n-1) * p**n * (1-p)**k
for k >= 0.
"""
)
## Geometric distribution
class geom_gen(rv_discrete):
def _rvs(self, pr):
return mtrand.geometric(pr,size=self._size)
def _argcheck(self, pr):
return (pr<=1) & (pr >= 0)
def _pmf(self, k, pr):
return (1-pr)**(k-1) * pr
def _logpmf(self, k, pr):
return (k-1)*log(1-pr) + pr
def _cdf(self, x, pr):
k = floor(x)
return (1.0-(1.0-pr)**k)
def _sf(self, x, pr):
k = floor(x)
return (1.0-pr)**k
def _ppf(self, q, pr):
vals = ceil(log(1.0-q)/log(1-pr))
temp = 1.0-(1.0-pr)**(vals-1)
return where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, pr):
mu = 1.0/pr
qr = 1.0-pr
var = qr / pr / pr
g1 = (2.0-pr) / sqrt(qr)
g2 = numpy.polyval([1,-6,6],pr)/(1.0-pr)
return mu, var, g1, g2
geom = geom_gen(a=1,name='geom', longname="A geometric",
shapes="pr", extradoc="""
Geometric distribution
geom.pmf(k,p) = (1-p)**(k-1)*p
for k >= 1
"""
)
## Hypergeometric distribution
class hypergeom_gen(rv_discrete):
def _rvs(self, M, n, N):
return mtrand.hypergeometric(n,M-n,N,size=self._size)
def _argcheck(self, M, n, N):
cond = rv_discrete._argcheck(self,M,n,N)
cond &= (n <= M) & (N <= M)
self.a = N-(M-n)
self.b = min(n,N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
#same as the following but numerically more precise
#return comb(good,k) * comb(bad,N-k) / comb(tot,N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
tot, good = M, n
n = good*1.0
m = (tot-good)*1.0
N = N*1.0
tot = m+n
p = n/tot
mu = N*p
var = m*n*N*(tot-N)*1.0/(tot*tot*(tot-1))
g1 = (m - n)*(tot-2*N) / (tot-2.0)*sqrt((tot-1.0)/(m*n*N*(tot-N)))
m2, m3, m4, m5 = m**2, m**3, m**4, m**5
n2, n3, n4, n5 = n**2, n**2, n**4, n**5
g2 = m3 - m5 + n*(3*m2-6*m3+m4) + 3*m*n2 - 12*m2*n2 + 8*m3*n2 + n3 \
- 6*m*n3 + 8*m2*n3 + m*n4 - n5 - 6*m3*N + 6*m4*N + 18*m2*n*N \
- 6*m3*n*N + 18*m*n2*N - 24*m2*n2*N - 6*n3*N - 6*m*n3*N \
+ 6*n4*N + N*N*(6*m2 - 6*m3 - 24*m*n + 12*m2*n + 6*n2 + \
12*m*n2 - 6*n3)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = r_[N-(M-n):min(n,N)+1]
vals = self.pmf(k,M,n,N)
lvals = where(vals==0.0,0.0,log(vals))
return -sum(vals*lvals,axis=0)
hypergeom = hypergeom_gen(name='hypergeom',longname="A hypergeometric",
shapes="M, n, N", extradoc="""
Hypergeometric distribution
Models drawing objects from a bin.
M is total number of objects, n is total number of Type I objects.
RV counts number of Type I objects in N drawn without replacement from
population.
hypergeom.pmf(k, M, n, N) = choose(n,k)*choose(M-n,N-k)/choose(M,N)
for N - (M-n) <= k <= min(m,N)
"""
)
## Logarithmic (Log-Series), (Series) distribution
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
def _rvs(self, pr):
# looks wrong for pr>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return mtrand.logseries(pr,size=self._size)
def _argcheck(self, pr):
return (pr > 0) & (pr < 1)
def _pmf(self, k, pr):
return -pr**k * 1.0 / k / log(1-pr)
def _stats(self, pr):
r = log(1-pr)
mu = pr / (pr - 1.0) / r
mu2p = -pr / r / (pr-1.0)**2
var = mu2p - mu*mu
mu3p = -pr / r * (1.0+pr) / (1.0-pr)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / var**1.5
mu4p = -pr / r * (1.0/(pr-1)**2 - 6*pr/(pr-1)**3 + \
6*pr*pr / (pr-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1,name='logser', longname='A logarithmic',
shapes='pr', extradoc="""
Logarithmic (Log-Series, Series) distribution
logser.pmf(k,p) = - p**k / (k*log(1-p))
for k >= 1
"""
)
## Poisson distribution
class poisson_gen(rv_discrete):
def _rvs(self, mu):
return mtrand.poisson(mu, self._size)
def _pmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return exp(Pk)
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k,mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k,mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q,mu))
vals1 = vals-1
temp = special.pdtr(vals1,mu)
return where((temp >= q), vals1, vals)
def _stats(self, mu):
var = mu
g1 = 1.0/arr(sqrt(mu))
g2 = 1.0 / arr(mu)
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson',
shapes="mu", extradoc="""
Poisson distribution
poisson.pmf(k, mu) = exp(-mu) * mu**k / k!
for k >= 0
"""
)
## (Planck) Discrete Exponential
class planck_gen(rv_discrete):
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = inf
return 1
elif (lambda_ < 0):
self.a = -inf
self.b = 0
return 1
return 0 # lambda_ = 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck',longname='A discrete exponential ',
shapes="lamda",
extradoc="""
Planck (Discrete Exponential)
planck.pmf(k,b) = (1-exp(-b))*exp(-b*k)
for k*b >= 0
"""
)
class boltzmann_gen(rv_discrete):
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',longname='A truncated discrete exponential ',
shapes="lamda, N",
extradoc="""
Boltzmann (Truncated Discrete Exponential)
boltzmann.pmf(k,b,N) = (1-exp(-b))*exp(-b*k)/(1-exp(-b*N))
for k=0,..,N-1
"""
)
## Discrete Uniform
class randint_gen(rv_discrete):
def _argcheck(self, min, max):
self.a = min
self.b = max-1
return (max > min)
def _pmf(self, k, min, max):
fact = 1.0 / (max - min)
return fact
def _cdf(self, x, min, max):
k = floor(x)
return (k-min+1)*1.0/(max-min)
def _ppf(self, q, min, max):
vals = ceil(q*(max-min)+min)-1
vals1 = (vals-1).clip(min, max)
temp = self._cdf(vals1, min, max)
return where(temp >= q, vals1, vals)
def _stats(self, min, max):
m2, m1 = arr(max), arr(min)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d-1)*(d+1.0)/12.0
g1 = 0.0
g2 = -6.0/5.0*(d*d+1.0)/(d-1.0)*(d+1.0)
return mu, var, g1, g2
def _rvs(self, min, max=None):
"""An array of *size* random integers >= min and < max.
If max is None, then range is >=0 and < min
"""
return mtrand.randint(min, max, self._size)
def _entropy(self, min, max):
return log(max-min)
randint = randint_gen(name='randint',longname='A discrete uniform '\
'(random integer)', shapes="min, max",
extradoc="""
Discrete Uniform
Random integers >=min and <max.
randint.pmf(k,min, max) = 1/(max-min)
for min <= k < max.
"""
)
# Zipf distribution
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
def _rvs(self, a):
return mtrand.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / arr(special.zeta(a,1) * k**a)
return Pk
def _munp(self, n, a):
return special.zeta(a-n,1) / special.zeta(a,1)
def _stats(self, a):
sv = errp(0)
fac = arr(special.zeta(a,1))
mu = special.zeta(a-1.0,1)/fac
mu2p = special.zeta(a-2.0,1)/fac
var = mu2p - mu*mu
mu3p = special.zeta(a-3.0,1)/fac
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / arr(var**1.5)
mu4p = special.zeta(a-4.0,1)/fac
sv = errp(sv)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / arr(var**2) - 3.0
return mu, var, g1, g2
zipf = zipf_gen(a=1,name='zipf', longname='A Zipf',
shapes="a", extradoc="""
Zipf distribution
zipf.pmf(k,a) = 1/(zeta(a)*k**a)
for k >= 1
"""
)
# Discrete Laplacian
class dlaplace_gen(rv_discrete):
def _pmf(self, k, a):
return tanh(a/2.0)*exp(-a*abs(k))
def _cdf(self, x, a):
k = floor(x)
ind = (k >= 0)
const = exp(a)+1
return where(ind, 1.0-exp(-a*k)/const, exp(a*(k+1))/const)
def _ppf(self, q, a):
const = 1.0/(1+exp(-a))
cons2 = 1+exp(a)
ind = q < const
vals = ceil(where(ind, log(q*cons2)/a-1, -log((1-q)*cons2)/a))
vals1 = (vals-1)
temp = self._cdf(vals1, a)
return where(temp >= q, vals1, vals)
def _stats_skip(self, a):
# variance mu2 does not aggree with sample variance,
# nor with direct calculation using pmf
# remove for now because generic calculation works
# except it does not show nice zeros for mean and skew(?)
ea = exp(-a)
e2a = exp(-2*a)
e3a = exp(-3*a)
e4a = exp(-4*a)
mu2 = 2* (e2a + ea) / (1-ea)**3.0
mu4 = 2* (e4a + 11*e3a + 11*e2a + ea) / (1-ea)**5.0
return 0.0, mu2, 0.0, mu4 / mu2**2.0 - 3
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-inf,
name='dlaplace', longname='A discrete Laplacian',
shapes="a", extradoc="""
Discrete Laplacian distribution.
dlaplace.pmf(k,a) = tanh(a/2) * exp(-a*abs(k))
for a > 0.
"""
)
class skellam_gen(rv_discrete):
def _rvs(self, mu1, mu2):
n = self._size
return np.random.poisson(mu1, n)-np.random.poisson(mu2, n)
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0, ncx2.pdf(2*mu2, 2*(1-x), 2*mu1)*2,
ncx2.pdf(2*mu1, 2*(x+1), 2*mu2)*2)
#ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = np.floor(x)
px = np.where(x < 0, ncx2.cdf(2*mu2, -2*x, 2*mu1),
1-ncx2.cdf(2*mu1, 2*(x+1), 2*mu2))
return px
# enable later
## def _cf(self, w, mu1, mu2):
## # characteristic function
## poisscf = poisson._cf
## return poisscf(w, mu1) * poisscf(-w, mu2)
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / np.sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam',
shapes="mu1,mu2", extradoc="""
Skellam distribution
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, k1-k2 follows a Skellam distribution with
parameters mu1 = lam1 - rho*sqrt(lam1*lam2) and
mu2 = lam2 - rho*sqrt(lam1*lam2), where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then rho = 0.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
"""
)
| gpl-3.0 |
IndraVikas/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 167 | 1659 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
Fireblend/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
eladnoor/equilibrator-api | pathways_cmd.py | 1 | 1031 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 1 13:24:15 2017
@author: noore
"""
import argparse
import logging
from equilibrator_api import Pathway
from matplotlib.backends.backend_pdf import PdfPages
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Calculate the Max-min Driving Force (MDF) of a pathway.')
parser.add_argument(
'infile', type=argparse.FileType(),
help='path to input file containing reactions')
parser.add_argument(
'outfile', type=str,
help='path to output PDF file')
logging.getLogger().setLevel(logging.WARNING)
args = parser.parse_args()
pp = Pathway.from_sbtab(args.infile)
output_pdf = PdfPages(args.outfile)
mdf_res = pp.calc_mdf()
output_pdf.savefig(mdf_res.conc_plot)
output_pdf.savefig(mdf_res.mdf_plot)
output_pdf.close()
rxn_df = pd.DataFrame(mdf_res.report_reactions)
cpd_df = pd.DataFrame(mdf_res.report_compounds)
| mit |
costypetrisor/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/user_interfaces/embedding_in_wx5.py | 12 | 1586 | # Used to guarantee to use at least Wx2.8
import wxversion
wxversion.ensureMinimal('2.8')
import wx
import wx.aui
import matplotlib as mpl
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as Canvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2Wx as Toolbar
class Plot(wx.Panel):
def __init__(self, parent, id = -1, dpi = None, **kwargs):
wx.Panel.__init__(self, parent, id=id, **kwargs)
self.figure = mpl.figure.Figure(dpi=dpi, figsize=(2,2))
self.canvas = Canvas(self, -1, self.figure)
self.toolbar = Toolbar(self.canvas)
self.toolbar.Realize()
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.canvas,1,wx.EXPAND)
sizer.Add(self.toolbar, 0 , wx.LEFT | wx.EXPAND)
self.SetSizer(sizer)
class PlotNotebook(wx.Panel):
def __init__(self, parent, id = -1):
wx.Panel.__init__(self, parent, id=id)
self.nb = wx.aui.AuiNotebook(self)
sizer = wx.BoxSizer()
sizer.Add(self.nb, 1, wx.EXPAND)
self.SetSizer(sizer)
def add(self,name="plot"):
page = Plot(self.nb)
self.nb.AddPage(page,name)
return page.figure
def demo():
app = wx.PySimpleApp()
frame = wx.Frame(None,-1,'Plotter')
plotter = PlotNotebook(frame)
axes1 = plotter.add('figure 1').gca()
axes1.plot([1,2,3],[2,1,4])
axes2 = plotter.add('figure 2').gca()
axes2.plot([1,2,3,4,5],[2,1,4,2,3])
#axes1.figure.canvas.draw()
#axes2.figure.canvas.draw()
frame.Show()
app.MainLoop()
if __name__ == "__main__": demo()
| mit |
mikebenfield/scikit-learn | sklearn/linear_model/tests/test_huber.py | 54 | 7619 | # Authors: Manoj Kumar mks542@nyu.edu
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_false
from sklearn.datasets import make_regression
from sklearn.linear_model import (
HuberRegressor, LinearRegression, SGDRegressor, Ridge)
from sklearn.linear_model.huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features,
random_state=0, noise=0.05)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression(fit_intercept=True)
lr.fit(X, y)
huber = HuberRegressor(fit_intercept=True, epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
loss_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[0]
grad_func = lambda x, *args: _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True)
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
# Rescale coefs before comparing with assert_array_almost_equal to make sure
# that the number of decimal places used is somewhat insensitive to the
# amplitude of the coefficients and therefore to the scale of the data
# and the regularization parameter
scale = max(np.mean(np.abs(huber.coef_)),
np.mean(np.abs(huber.intercept_)))
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
sample_weight = np.ones(X.shape[0])
sample_weight[1] = 3
sample_weight[3] = 2
huber.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale,
huber_intercept / scale)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True)
huber_sparse.fit(X_csr, y, sample_weight=sample_weight)
assert_array_almost_equal(huber_sparse.coef_ / scale,
huber_coef / scale)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(fit_intercept=True, alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
assert_array_equal(huber.outliers_, huber_sparse.outliers_)
def test_huber_scaling_invariant():
"""Test that outliers filtering is scaling independent."""
rng = np.random.RandomState(0)
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert_false(np.all(n_outliers_mask_1))
huber.fit(X, 2. * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2. * X, 2. * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
"""Test they should converge to same coefficients for same parameters"""
X, y = make_regression_with_outliers(n_samples=10, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100,
epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0, loss="huber", shuffle=True, random_state=0, n_iter=10000,
fit_intercept=False, epsilon=1.35)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(
fit_intercept=True, alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
# No n_iter_ in old SciPy (<=0.9)
if huber_warm.n_iter_ is not None:
assert_equal(0, huber_warm.n_iter_)
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=True, alpha=0.01, max_iter=100)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert_greater(huber_score, ridge_score)
# The huber model should also fit poorly on the outliers.
assert_greater(ridge_outlier_score, huber_outlier_score)
| bsd-3-clause |
anntzer/scikit-learn | examples/calibration/plot_calibration_curve.py | 24 | 5902 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100,000 samples (1,000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.model_selection import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1.)
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration curve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration curve for Linear SVC
plot_calibration_curve(LinearSVC(max_iter=10000), "SVC", 2)
plt.show()
| bsd-3-clause |
Winand/pandas | pandas/core/dtypes/api.py | 16 | 2399 | # flake8: noqa
import sys
from .common import (pandas_dtype,
is_dtype_equal,
is_extension_type,
# categorical
is_categorical,
is_categorical_dtype,
# interval
is_interval,
is_interval_dtype,
# datetimelike
is_datetimetz,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_datetime64_any_dtype,
is_datetime64_ns_dtype,
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_period,
is_period_dtype,
# string-like
is_string_dtype,
is_object_dtype,
# sparse
is_sparse,
# numeric types
is_scalar,
is_sparse,
is_bool,
is_integer,
is_float,
is_complex,
is_number,
is_integer_dtype,
is_int64_dtype,
is_numeric_dtype,
is_float_dtype,
is_bool_dtype,
is_complex_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
# like
is_re,
is_re_compilable,
is_dict_like,
is_iterator,
is_file_like,
is_list_like,
is_hashable,
is_named_tuple)
# deprecated
m = sys.modules['pandas.core.dtypes.api']
for t in ['is_any_int_dtype', 'is_floating_dtype', 'is_sequence']:
def outer(t=t):
def wrapper(arr_or_dtype):
import warnings
import pandas
warnings.warn("{t} is deprecated and will be "
"removed in a future version".format(t=t),
FutureWarning, stacklevel=3)
return getattr(pandas.core.dtypes.common, t)(arr_or_dtype)
return wrapper
setattr(m, t, outer(t))
del sys, m, t, outer
| bsd-3-clause |
wangyum/mxnet | example/kaggle-ndsb1/submission_dsb.py | 52 | 5048 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import pandas as pd
import os
import time as time
## Receives an array with probabilities for each class (columns) X images in test set (as listed in test.lst) and formats in Kaggle submission format, saves and compresses in submission_path
def gen_sub(predictions,test_lst_path="test.lst",submission_path="submission.csv"):
## append time to avoid overwriting previous submissions
## submission_path=time.strftime("%Y%m%d%H%M%S_")+submission_path
### Make submission
## check sampleSubmission.csv from kaggle website to view submission format
header = "acantharia_protist_big_center,acantharia_protist_halo,acantharia_protist,amphipods,appendicularian_fritillaridae,appendicularian_s_shape,appendicularian_slight_curve,appendicularian_straight,artifacts_edge,artifacts,chaetognath_non_sagitta,chaetognath_other,chaetognath_sagitta,chordate_type1,copepod_calanoid_eggs,copepod_calanoid_eucalanus,copepod_calanoid_flatheads,copepod_calanoid_frillyAntennae,copepod_calanoid_large_side_antennatucked,copepod_calanoid_large,copepod_calanoid_octomoms,copepod_calanoid_small_longantennae,copepod_calanoid,copepod_cyclopoid_copilia,copepod_cyclopoid_oithona_eggs,copepod_cyclopoid_oithona,copepod_other,crustacean_other,ctenophore_cestid,ctenophore_cydippid_no_tentacles,ctenophore_cydippid_tentacles,ctenophore_lobate,decapods,detritus_blob,detritus_filamentous,detritus_other,diatom_chain_string,diatom_chain_tube,echinoderm_larva_pluteus_brittlestar,echinoderm_larva_pluteus_early,echinoderm_larva_pluteus_typeC,echinoderm_larva_pluteus_urchin,echinoderm_larva_seastar_bipinnaria,echinoderm_larva_seastar_brachiolaria,echinoderm_seacucumber_auricularia_larva,echinopluteus,ephyra,euphausiids_young,euphausiids,fecal_pellet,fish_larvae_deep_body,fish_larvae_leptocephali,fish_larvae_medium_body,fish_larvae_myctophids,fish_larvae_thin_body,fish_larvae_very_thin_body,heteropod,hydromedusae_aglaura,hydromedusae_bell_and_tentacles,hydromedusae_h15,hydromedusae_haliscera_small_sideview,hydromedusae_haliscera,hydromedusae_liriope,hydromedusae_narco_dark,hydromedusae_narco_young,hydromedusae_narcomedusae,hydromedusae_other,hydromedusae_partial_dark,hydromedusae_shapeA_sideview_small,hydromedusae_shapeA,hydromedusae_shapeB,hydromedusae_sideview_big,hydromedusae_solmaris,hydromedusae_solmundella,hydromedusae_typeD_bell_and_tentacles,hydromedusae_typeD,hydromedusae_typeE,hydromedusae_typeF,invertebrate_larvae_other_A,invertebrate_larvae_other_B,jellies_tentacles,polychaete,protist_dark_center,protist_fuzzy_olive,protist_noctiluca,protist_other,protist_star,pteropod_butterfly,pteropod_theco_dev_seq,pteropod_triangle,radiolarian_chain,radiolarian_colony,shrimp_caridean,shrimp_sergestidae,shrimp_zoea,shrimp-like_other,siphonophore_calycophoran_abylidae,siphonophore_calycophoran_rocketship_adult,siphonophore_calycophoran_rocketship_young,siphonophore_calycophoran_sphaeronectes_stem,siphonophore_calycophoran_sphaeronectes_young,siphonophore_calycophoran_sphaeronectes,siphonophore_other_parts,siphonophore_partial,siphonophore_physonect_young,siphonophore_physonect,stomatopod,tornaria_acorn_worm_larvae,trichodesmium_bowtie,trichodesmium_multiple,trichodesmium_puff,trichodesmium_tuft,trochophore_larvae,tunicate_doliolid_nurse,tunicate_doliolid,tunicate_partial,tunicate_salp_chains,tunicate_salp,unknown_blobs_and_smudges,unknown_sticks,unknown_unclassified".split(',')
# read first line to know the number of columns and column to use
img_lst = pd.read_csv(test_lst_path,sep="/",header=None, nrows=1)
columns = img_lst.columns.tolist() # get the columns
cols_to_use = columns[len(columns)-1] # drop the last one
cols_to_use= map(int, str(cols_to_use)) ## convert scalar to list
img_lst= pd.read_csv(test_lst_path,sep="/",header=None, usecols=cols_to_use) ## reads lst, use / as sep to goet last column with filenames
img_lst=img_lst.values.T.tolist()
df = pd.DataFrame(predictions,columns = header, index=img_lst)
df.index.name = 'image'
print("Saving csv to %s" % submission_path)
df.to_csv(submission_path)
print("Compress with gzip")
os.system("gzip -f %s" % submission_path)
print(" stored in %s.gz" % submission_path)
| apache-2.0 |
markovg/nest-simulator | pynest/examples/intrinsic_currents_spiking.py | 13 | 5954 | # -*- coding: utf-8 -*-
#
# intrinsic_currents_spiking.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
#
'''
Intrinsic currents spiking
--------------------------
This example illustrates a neuron receiving spiking input through
several different receptors (AMPA, NMDA, GABA_A, GABA_B), provoking
spike output. The model, `ht_neuron`, also has intrinsic currents
(I_NaP, I_KNa, I_T, and I_h). It is a slightly simplified implementation of
neuron model proposed in Hill and Tononi (2005) **Modeling Sleep and
Wakefulness in the Thalamocortical System** *J Neurophysiol* 93:1671
http://dx.doi.org/10.1152/jn.00915.2004.
The neuron is bombarded with spike trains from four
Poisson generators, which are connected to the AMPA,
NMDA, GABA_A, and GABA_B receptors, respectively.
See also: intrinsic_currents_subthreshold.py
'''
'''
We imported all necessary modules for simulation, analysis and
plotting.
'''
import nest
import numpy as np
import matplotlib.pyplot as plt
'''
Additionally, we set the verbosity using `set_verbosity` to
suppress info messages. We also reset the kernel to be sure to start
with a clean NEST.
'''
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
'''
We define the simulation parameters:
- The rate of the input spike trains
- The weights of the different receptors (names must match receptor types)
- The time to simulate
Note that all parameter values should be doubles, since NEST expects doubles.
'''
rate_in = 100.
w_recep = {'AMPA': 30., 'NMDA': 30., 'GABA_A': 5., 'GABA_B': 10.}
t_sim = 250.
num_recep = len(w_recep)
'''
We create
- one neuron instance
- one Poisson generator instance for each synapse type
- one multimeter to record from the neuron:
- membrane potential
- threshold potential
- synaptic conductances
- intrinsic currents
See `intrinsic_currents_subthreshold.py` for more details on
`multimeter` configuration.
'''
nrn = nest.Create('ht_neuron')
p_gens = nest.Create('poisson_generator', 4,
params={'rate': rate_in})
mm = nest.Create('multimeter',
params={'interval': 0.1,
'record_from': ['V_m', 'theta',
'g_AMPA', 'g_NMDA',
'g_GABA_A', 'g_GABA_B',
'I_NaP', 'I_KNa', 'I_T', 'I_h']})
'''
We now connect each Poisson generator with the neuron through a
different receptor type.
First, we need to obtain the numerical codes for the receptor types
from the model. The `receptor_types` entry of the default dictionary
for the `ht_neuron` model is a dictionary mapping receptor names to
codes.
In the loop, we use Python's tuple unpacking mechanism to unpack
dictionary entries from our w_recep dictionary.
Note that we need to pack the ``pg`` variable into a list before
passing it to `Connect`, because iterating over the `p_gens` list
makes `pg` a "naked" GID.
'''
receptors = nest.GetDefaults('ht_neuron')['receptor_types']
for pg, (rec_name, rec_wgt) in zip(p_gens, w_recep.items()):
nest.Connect([pg], nrn, syn_spec={'receptor_type': receptors[rec_name],
'weight': rec_wgt})
'''
We then connnect the multimeter. Note that the multimeter is
connected to the neuron, not the other way around.
'''
nest.Connect(mm, nrn)
'''
We are now ready to simulate.
'''
nest.Simulate(t_sim)
'''
We now fetch the data recorded by the multimeter. The data are
returned as a dictionary with entry ``'times'`` containing timestamps
for all recorded data, plus one entry per recorded quantity.
All data is contained in the ``'events'`` entry of the status dictionary
returned by the multimeter. Because all NEST function return arrays,
we need to pick out element ``0`` from the result of `GetStatus`.
'''
data = nest.GetStatus(mm)[0]['events']
t = data['times']
'''
The following function turns a name such as I_NaP into proper TeX code
$I_{\mathrm{NaP}}$ for a pretty label.
'''
def texify_name(name):
return r'${}_{{\mathrm{{{}}}}}$'.format(*name.split('_'))
'''
The next step is to plot the results. We create a new figure, and
add one subplot each for membrane and threshold potential,
synaptic conductances, and intrinsic currents.
'''
fig = plt.figure()
Vax = fig.add_subplot(311)
Vax.plot(t, data['V_m'], 'b', lw=2, label=r'$V_m$')
Vax.plot(t, data['theta'], 'g', lw=2, label=r'$\Theta$')
Vax.set_ylabel('Potential [mV]')
try:
Vax.legend(fontsize='small')
except TypeError:
Vax.legend() # work-around for older Matplotlib versions
Vax.set_title('ht_neuron driven by Poisson processes')
Gax = fig.add_subplot(312)
for gname in ('g_AMPA', 'g_NMDA', 'g_GABA_A', 'g_GABA_B'):
Gax.plot(t, data[gname], lw=2, label=texify_name(gname))
try:
Gax.legend(fontsize='small')
except TypeError:
Gax.legend() # work-around for older Matplotlib versions
Gax.set_ylabel('Conductance [nS]')
Iax = fig.add_subplot(313)
for iname, color in (('I_h', 'maroon'), ('I_T', 'orange'),
('I_NaP', 'crimson'), ('I_KNa', 'aqua')):
Iax.plot(t, data[iname], color=color, lw=2, label=texify_name(iname))
try:
Iax.legend(fontsize='small')
except TypeError:
Iax.legend() # work-around for older Matplotlib versions
Iax.set_ylabel('Current [pA]')
Iax.set_xlabel('Time [ms]')
| gpl-2.0 |
gomezstevena/x-wind | src/trajectory.py | 1 | 1976 | # import matplotlib
# matplotlib.use('Agg')
from numpy import *
from scipy.interpolate import PiecewisePolynomial
class Trajectory:
def __init__(self, t, u, ddt=None):
assert t.ndim == 1 and t.size == u.shape[0]
self.shape = u.shape[1:]
data = u.reshape([t.size, 1, -1])
if ddt is not None:
dudt = zeros(u.shape)
for i in range(t.size):
dudt[i] = ddt(u[i])
data = hstack([data, dudt.reshape([t.size, 1, -1])])
self.history = PiecewisePolynomial(t, data)
def __call__(self, t):
shape = array(t).shape
u = self.history(t)
return u.reshape(shape + self.shape)
@property
def tlim(self):
t = self.history.xi
return array([min(t), max(t)])
if __name__ == '__main__':
Mach = 0.3
Re = 10000
HiRes = 1.
from navierstokes import *
soln = []
for i in range(1000):
fname = 'data/navierstokesStep{0:06d}.npz'.format(i)
if os.path.exists(fname):
z = load(fname)
soln.append(z['soln'])
else: break
geom, v, t, b = z['geom'], z['v'], z['t'], z['b']
soln = array(soln).squeeze()
print 'loading complete'
solver = NavierStokes(v, t, b, Mach, Re, HiRes)
traj0 = Trajectory(arange(soln.shape[0]), soln)
# traj1 = Trajectory(arange(soln.shape[0]), soln, ddt=solver.ddt)
t = linspace(traj0.tlim[0], traj0.tlim[1] * 0.1, 200)
it = solver.mesh.a.argsort()[:1]
plot(t, (traj0(t)[:,it,:] / solver.Wref).reshape([t.size, -1]))
# plot(t, (traj1(t)[:,it,:] / solver.Wref).reshape([t.size, -1]))
'''
for i in range(soln.shape[0]):
clf()
solver.mesh.plotTriScalar(soln[i,:,2])
plot(solver.mesh.xt()[it,0], solver.mesh.xt()[it,1], 'ok')
# solver.mesh.plotMesh(alpha=0.2)
axis([-2,4,-2.5,2.5])
# axis([-.2,.3,-.15,.15])
savefig('fig/trajectory_{0:06d}.png'.format(i))
'''
| gpl-3.0 |
louispotok/pandas | pandas/core/indexes/numeric.py | 4 | 14410 | import numpy as np
from pandas._libs import (index as libindex,
join as libjoin)
from pandas.core.dtypes.common import (
is_dtype_equal,
pandas_dtype,
needs_i8_conversion,
is_integer_dtype,
is_bool,
is_bool_dtype,
is_scalar)
from pandas import compat
from pandas.core import algorithms
import pandas.core.common as com
from pandas.core.indexes.base import (
Index, InvalidIndexError, _index_shared_docs)
from pandas.util._decorators import Appender, cache_readonly
import pandas.core.dtypes.concat as _concat
import pandas.core.indexes.base as ibase
_num_index_shared_docs = dict()
class NumericIndex(Index):
"""
Provide numeric type operations
This is an abstract class
"""
_is_numeric_dtype = True
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False):
if fastpath:
return cls._simple_new(data, name=name)
# is_scalar, generators handled in coerce_to_ndarray
data = cls._coerce_to_ndarray(data)
if issubclass(data.dtype.type, compat.string_types):
cls._string_data_error(data)
if copy or not is_dtype_equal(data.dtype, cls._default_dtype):
subarr = np.array(data, dtype=cls._default_dtype, copy=copy)
cls._assert_safe_casting(data, subarr)
else:
subarr = data
if name is None and hasattr(data, 'name'):
name = data.name
return cls._simple_new(subarr, name=name)
@Appender(_index_shared_docs['_maybe_cast_slice_bound'])
def _maybe_cast_slice_bound(self, label, side, kind):
assert kind in ['ix', 'loc', 'getitem', None]
# we will try to coerce to integers
return self._maybe_cast_indexer(label)
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is not None and not self._can_hold_na:
# Ensure we are not returning an Int64Index with float data:
return self._shallow_copy_with_infer(values=values, **kwargs)
return (super(NumericIndex, self)._shallow_copy(values=values,
**kwargs))
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
if is_bool(value) or is_bool_dtype(value):
# force conversion to object
# so we don't lose the bools
raise TypeError
return value
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(tolerance)
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError('list-like tolerance size must match '
'target index size')
if not np.issubdtype(tolerance.dtype, np.number):
if tolerance.ndim > 0:
raise ValueError(('tolerance argument for %s must contain '
'numeric elements if it is list type') %
(type(self).__name__,))
else:
raise ValueError(('tolerance argument for %s must be numeric '
'if it is a scalar: %r') %
(type(self).__name__, tolerance))
return tolerance
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Subclasses need to override this only if the process of casting data
from some accepted dtype to the internal dtype(s) bears the risk of
truncation (e.g. float to int).
"""
pass
def _concat_same_dtype(self, indexes, name):
return _concat._concat_index_same_dtype(indexes).rename(name)
@property
def is_all_dates(self):
"""
Checks that all the labels are datetime objects
"""
return False
_num_index_shared_docs['class_descr'] = """
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects. %(klass)s is a special case
of `Index` with purely %(ltype)s labels. %(extra)s
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: %(dtype)s)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
Attributes
----------
None
Methods
-------
None
Notes
-----
An Index instance can **only** contain hashable objects.
See also
--------
Index : The base pandas Index type
"""
_int64_descr_args = dict(
klass='Int64Index',
ltype='integer',
dtype='int64',
extra=''
)
class Int64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _int64_descr_args
_typ = 'int64index'
_left_indexer_unique = libjoin.left_join_indexer_unique_int64
_left_indexer = libjoin.left_join_indexer_int64
_inner_indexer = libjoin.inner_join_indexer_int64
_outer_indexer = libjoin.outer_join_indexer_int64
_can_hold_na = False
_engine_type = libindex.Int64Engine
_default_dtype = np.int64
@property
def inferred_type(self):
"""Always 'integer' for ``Int64Index``"""
return 'integer'
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('i8')
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# don't coerce ilocs to integers
if kind != 'iloc':
key = self._maybe_cast_indexer(key)
return (super(Int64Index, self)
._convert_scalar_indexer(key, kind=kind))
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Int64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as ints.
"""
if not issubclass(data.dtype.type, np.signedinteger):
if not np.array_equal(data, subarr):
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
Int64Index._add_numeric_methods()
Int64Index._add_logical_methods()
_uint64_descr_args = dict(
klass='UInt64Index',
ltype='unsigned integer',
dtype='uint64',
extra=''
)
class UInt64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _uint64_descr_args
_typ = 'uint64index'
_left_indexer_unique = libjoin.left_join_indexer_unique_uint64
_left_indexer = libjoin.left_join_indexer_uint64
_inner_indexer = libjoin.inner_join_indexer_uint64
_outer_indexer = libjoin.outer_join_indexer_uint64
_can_hold_na = False
_engine_type = libindex.UInt64Engine
_default_dtype = np.uint64
@property
def inferred_type(self):
"""Always 'integer' for ``UInt64Index``"""
return 'integer'
@property
def asi8(self):
# do not cache or you'll create a memory leak
return self.values.view('u8')
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# don't coerce ilocs to integers
if kind != 'iloc':
key = self._maybe_cast_indexer(key)
return (super(UInt64Index, self)
._convert_scalar_indexer(key, kind=kind))
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
keyarr = com._asarray_tuplesafe(keyarr)
if is_integer_dtype(keyarr):
return com._asarray_tuplesafe(keyarr, dtype=np.uint64)
return keyarr
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
# Cast the indexer to uint64 if possible so
# that the values returned from indexing are
# also uint64.
if keyarr.is_integer():
return keyarr.astype(np.uint64)
return keyarr
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return UInt64Index(joined, name=name)
@classmethod
def _assert_safe_casting(cls, data, subarr):
"""
Ensure incoming data can be represented as uints.
"""
if not issubclass(data.dtype.type, np.unsignedinteger):
if not np.array_equal(data, subarr):
raise TypeError('Unsafe NumPy casting, you must '
'explicitly cast')
UInt64Index._add_numeric_methods()
UInt64Index._add_logical_methods()
_float64_descr_args = dict(
klass='Float64Index',
dtype='float64',
ltype='float',
extra=''
)
class Float64Index(NumericIndex):
__doc__ = _num_index_shared_docs['class_descr'] % _float64_descr_args
_typ = 'float64index'
_engine_type = libindex.Float64Engine
_left_indexer_unique = libjoin.left_join_indexer_unique_float64
_left_indexer = libjoin.left_join_indexer_float64
_inner_indexer = libjoin.inner_join_indexer_float64
_outer_indexer = libjoin.outer_join_indexer_float64
_default_dtype = np.float64
@property
def inferred_type(self):
"""Always 'floating' for ``Float64Index``"""
return 'floating'
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if needs_i8_conversion(dtype):
msg = ('Cannot convert Float64Index to dtype {dtype}; integer '
'values are required for conversion').format(dtype=dtype)
raise TypeError(msg)
elif is_integer_dtype(dtype) and self.hasnans:
# GH 13149
raise ValueError('Cannot convert NA to integer')
return super(Float64Index, self).astype(dtype, copy=copy)
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
return key
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
if kind == 'iloc':
return super(Float64Index, self)._convert_slice_indexer(key,
kind=kind)
# translate to locations
return self.slice_indexer(key.start, key.stop, key.step, kind=kind)
def _format_native_types(self, na_rep='', float_format=None, decimal='.',
quoting=None, **kwargs):
from pandas.io.formats.format import FloatArrayFormatter
formatter = FloatArrayFormatter(self.values, na_rep=na_rep,
float_format=float_format,
decimal=decimal, quoting=quoting,
fixed_width=False)
return formatter.get_result_as_array()
def get_value(self, series, key):
""" we always want to get an index value, never a value """
if not is_scalar(key):
raise InvalidIndexError
k = com._values_from_object(key)
loc = self.get_loc(k)
new_values = com._values_from_object(series)[loc]
return new_values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self is other:
return True
if not isinstance(other, Index):
return False
# need to compare nans locations and make sure that they are the same
# since nans don't compare equal this is a bit tricky
try:
if not isinstance(other, Float64Index):
other = self._constructor(other)
if (not is_dtype_equal(self.dtype, other.dtype) or
self.shape != other.shape):
return False
left, right = self._ndarray_values, other._ndarray_values
return ((left == right) | (self._isnan & other._isnan)).all()
except (TypeError, ValueError):
return False
def __contains__(self, other):
if super(Float64Index, self).__contains__(other):
return True
try:
# if other is a sequence this throws a ValueError
return np.isnan(other) and self.hasnans
except ValueError:
try:
return len(other) <= 1 and ibase._try_get_item(other) in self
except TypeError:
pass
except TypeError:
pass
return False
@Appender(_index_shared_docs['get_loc'])
def get_loc(self, key, method=None, tolerance=None):
try:
if np.all(np.isnan(key)):
nan_idxs = self._nan_idxs
try:
return nan_idxs.item()
except (ValueError, IndexError):
# should only need to catch ValueError here but on numpy
# 1.7 .item() can raise IndexError when NaNs are present
if not len(nan_idxs):
raise KeyError(key)
return nan_idxs
except (TypeError, NotImplementedError):
pass
return super(Float64Index, self).get_loc(key, method=method,
tolerance=tolerance)
@cache_readonly
def is_unique(self):
return super(Float64Index, self).is_unique and self._nan_idxs.size < 2
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is not None:
self._validate_index_level(level)
return algorithms.isin(np.array(self), values)
Float64Index._add_numeric_methods()
Float64Index._add_logical_methods_disabled()
| bsd-3-clause |
mdhaber/scipy | scipy/signal/ltisys.py | 12 | 128865 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
# Apr 2011: Jeffrey Armstrong <jeff@approximatrix.com>
# Added dlsim, dstep, dimpulse, cont2discrete
# Aug 2013: Juan Luis Cano
# Rewrote abcd_normalize.
# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr
# Added pole placement
# Mar 2015: Clancy Rowley
# Rewrote lsim
# May 2015: Felix Berkenkamp
# Split lti class into subclasses
# Merged discrete systems and added dlti
import warnings
# np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7
# use scipy's qr until this is solved
from scipy.linalg import qr as s_qr
from scipy import integrate, interpolate, linalg
from scipy.interpolate import interp1d
from .filter_design import (tf2zpk, zpk2tf, normalize, freqs, freqz, freqs_zpk,
freqz_zpk)
from .lti_conversion import (tf2ss, abcd_normalize, ss2tf, zpk2ss, ss2zpk,
cont2discrete)
import numpy
import numpy as np
from numpy import (real, atleast_1d, atleast_2d, squeeze, asarray, zeros,
dot, transpose, ones, zeros_like, linspace, nan_to_num)
import copy
__all__ = ['lti', 'dlti', 'TransferFunction', 'ZerosPolesGain', 'StateSpace',
'lsim', 'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
'freqresp', 'place_poles', 'dlsim', 'dstep', 'dimpulse',
'dfreqresp', 'dbode']
class LinearTimeInvariant:
def __new__(cls, *system, **kwargs):
"""Create a new object, don't allow direct instances."""
if cls is LinearTimeInvariant:
raise NotImplementedError('The LinearTimeInvariant class is not '
'meant to be used directly, use `lti` '
'or `dlti` instead.')
return super(LinearTimeInvariant, cls).__new__(cls)
def __init__(self):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super().__init__()
self.inputs = None
self.outputs = None
self._dt = None
@property
def dt(self):
"""Return the sampling time of the system, `None` for `lti` systems."""
return self._dt
@property
def _dt_dict(self):
if self.dt is None:
return {}
else:
return {'dt': self.dt}
@property
def zeros(self):
"""Zeros of the system."""
return self.to_zpk().zeros
@property
def poles(self):
"""Poles of the system."""
return self.to_zpk().poles
def _as_ss(self):
"""Convert to `StateSpace` system, without copying.
Returns
-------
sys: StateSpace
The `StateSpace` system. If the class is already an instance of
`StateSpace` then this instance is returned.
"""
if isinstance(self, StateSpace):
return self
else:
return self.to_ss()
def _as_zpk(self):
"""Convert to `ZerosPolesGain` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `ZerosPolesGain` system. If the class is already an instance of
`ZerosPolesGain` then this instance is returned.
"""
if isinstance(self, ZerosPolesGain):
return self
else:
return self.to_zpk()
def _as_tf(self):
"""Convert to `TransferFunction` system, without copying.
Returns
-------
sys: ZerosPolesGain
The `TransferFunction` system. If the class is already an instance of
`TransferFunction` then this instance is returned.
"""
if isinstance(self, TransferFunction):
return self
else:
return self.to_tf()
class lti(LinearTimeInvariant):
r"""
Continuous-time linear time invariant system base class.
Parameters
----------
*system : arguments
The `lti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
continuous-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, dlti
Notes
-----
`lti` instances do not exist directly. Instead, `lti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3,
5]``).
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> signal.lti(1, 2, 3, 4)
StateSpaceContinuous(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: None
)
Construct the transfer function
:math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> signal.lti([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Construct the transfer function :math:`H(s) = \frac{3s + 4}{1s + 2}`:
>>> signal.lti([3, 4], [1, 2])
TransferFunctionContinuous(
array([3., 4.]),
array([1., 2.]),
dt: None
)
"""
def __new__(cls, *system):
"""Create an instance of the appropriate subclass."""
if cls is lti:
N = len(system)
if N == 2:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous, *system)
elif N == 3:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous, *system)
elif N == 4:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system)
else:
raise ValueError("`system` needs to be an instance of `lti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(lti, cls).__new__(cls)
def __init__(self, *system):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
super().__init__(*system)
def impulse(self, X0=None, T=None, N=None):
"""
Return the impulse response of a continuous-time system.
See `impulse` for details.
"""
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
"""
Return the step response of a continuous-time system.
See `step` for details.
"""
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
"""
Return the response of a continuous-time system to input `U`.
See `lsim` for details.
"""
return lsim(self, U, T, X0=X0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `bode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return bode(self, w=w, n=n)
def freqresp(self, w=None, n=10000):
"""
Calculate the frequency response of a continuous-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `freqresp` for details.
"""
return freqresp(self, w=w, n=n)
def to_discrete(self, dt, method='zoh', alpha=None):
"""Return a discretized version of the current system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti`
"""
raise NotImplementedError('to_discrete is not implemented for this '
'system class.')
class dlti(LinearTimeInvariant):
r"""
Discrete-time linear time invariant system base class.
Parameters
----------
*system: arguments
The `dlti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
discrete-time subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to ``True``
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, TransferFunction, lti
Notes
-----
`dlti` instances do not exist directly. Instead, `dlti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
If (numerator, denominator) is passed in for ``*system``, coefficients for
both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as ``[1, 3,
5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> signal.dlti(1, 2, 3, 4)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: True
)
>>> signal.dlti(1, 2, 3, 4, dt=0.1)
StateSpaceDiscrete(
array([[1]]),
array([[2]]),
array([[3]]),
array([[4]]),
dt: 0.1
)
Construct the transfer function
:math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time
of 0.1 seconds:
>>> signal.dlti([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
Construct the transfer function :math:`H(z) = \frac{3z + 4}{1z + 2}` with
a sampling time of 0.1 seconds:
>>> signal.dlti([3, 4], [1, 2], dt=0.1)
TransferFunctionDiscrete(
array([3., 4.]),
array([1., 2.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Create an instance of the appropriate subclass."""
if cls is dlti:
N = len(system)
if N == 2:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete, *system, **kwargs)
elif N == 3:
return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete,
*system, **kwargs)
elif N == 4:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system,
**kwargs)
else:
raise ValueError("`system` needs to be an instance of `dlti` "
"or have 2, 3 or 4 arguments.")
# __new__ was called from a subclass, let it call its own functions
return super(dlti, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
dt = kwargs.pop('dt', True)
super().__init__(*system, **kwargs)
self.dt = dt
@property
def dt(self):
"""Return the sampling time of the system."""
return self._dt
@dt.setter
def dt(self, dt):
self._dt = dt
def impulse(self, x0=None, t=None, n=None):
"""
Return the impulse response of the discrete-time `dlti` system.
See `dimpulse` for details.
"""
return dimpulse(self, x0=x0, t=t, n=n)
def step(self, x0=None, t=None, n=None):
"""
Return the step response of the discrete-time `dlti` system.
See `dstep` for details.
"""
return dstep(self, x0=x0, t=t, n=n)
def output(self, u, t, x0=None):
"""
Return the response of the discrete-time system to input `u`.
See `dlsim` for details.
"""
return dlsim(self, u, t, x0=x0)
def bode(self, w=None, n=100):
r"""
Calculate Bode magnitude and phase data of a discrete-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `dbode` for details.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}`
with sampling time 0.5s:
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.5)
Equivalent: signal.dbode(sys)
>>> w, mag, phase = sys.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return dbode(self, w=w, n=n)
def freqresp(self, w=None, n=10000, whole=False):
"""
Calculate the frequency response of a discrete-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `dfreqresp` for details.
"""
return dfreqresp(self, w=w, n=n, whole=whole)
class TransferFunction(LinearTimeInvariant):
r"""Linear Time Invariant system class in transfer function form.
Represents the system as the continuous-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j` or the
discrete-time transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
`TransferFunction` systems inherit additional
functionality from the `lti`, respectively the `dlti` classes, depending on
which system representation is used.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, lti, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` or ``z^2 + 3z + 5`` would be
represented as ``[1, 3, 5]``)
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`:
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([1., 3., 3.]),
array([1., 2., 1.]),
dt: None
)
Construct the transfer function
:math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of
0.1 seconds:
>>> signal.TransferFunction(num, den, dt=0.1)
TransferFunctionDiscrete(
array([1., 3., 3.]),
array([1., 2., 1.]),
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of lti."""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_tf()
# Choose whether to inherit from `lti` or from `dlti`
if cls is TransferFunction:
if kwargs.get('dt') is None:
return TransferFunctionContinuous.__new__(
TransferFunctionContinuous,
*system,
**kwargs)
else:
return TransferFunctionDiscrete.__new__(
TransferFunctionDiscrete,
*system,
**kwargs)
# No special conversion needed
return super(TransferFunction, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super().__init__(**kwargs)
self._num = None
self._den = None
self.num, self.den = normalize(*system)
def __repr__(self):
"""Return representation of the system's transfer function"""
return '{0}(\n{1},\n{2},\ndt: {3}\n)'.format(
self.__class__.__name__,
repr(self.num),
repr(self.den),
repr(self.dt),
)
@property
def num(self):
"""Numerator of the `TransferFunction` system."""
return self._num
@num.setter
def num(self, num):
self._num = atleast_1d(num)
# Update dimensions
if len(self.num.shape) > 1:
self.outputs, self.inputs = self.num.shape
else:
self.outputs = 1
self.inputs = 1
@property
def den(self):
"""Denominator of the `TransferFunction` system."""
return self._den
@den.setter
def den(self, den):
self._den = atleast_1d(den)
def _copy(self, system):
"""
Copy the parameters of another `TransferFunction` object
Parameters
----------
system : `TransferFunction`
The `StateSpace` system that is to be copied
"""
self.num = system.num
self.den = system.den
def to_tf(self):
"""
Return a copy of the current `TransferFunction` system.
Returns
-------
sys : instance of `TransferFunction`
The current system (copy)
"""
return copy.deepcopy(self)
def to_zpk(self):
"""
Convert system representation to `ZerosPolesGain`.
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*tf2zpk(self.num, self.den),
**self._dt_dict)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*tf2ss(self.num, self.den),
**self._dt_dict)
@staticmethod
def _z_to_zinv(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((np.zeros(diff), den))
elif diff < 0:
num = np.hstack((np.zeros(-diff), num))
return num, den
@staticmethod
def _zinv_to_z(num, den):
"""Change a transfer function from the variable `z` to `z**-1`.
Parameters
----------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of ascending degree of 'z**-1'.
That is, ``5 + 3 z**-1 + 2 z**-2`` is presented as ``[5, 3, 2]``.
Returns
-------
num, den: 1d array_like
Sequences representing the coefficients of the numerator and
denominator polynomials, in order of descending degree of 'z'.
That is, ``5z**2 + 3z + 2`` is presented as ``[5, 3, 2]``.
"""
diff = len(num) - len(den)
if diff > 0:
den = np.hstack((den, np.zeros(diff)))
elif diff < 0:
num = np.hstack((num, np.zeros(-diff)))
return num, den
class TransferFunctionContinuous(TransferFunction, lti):
r"""
Continuous-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(s)=\sum_{i=0}^N b[N-i] s^i / \sum_{j=0}^M a[M-j] s^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Continuous-time `TransferFunction` systems inherit additional
functionality from the `lti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
See Also
--------
ZerosPolesGain, StateSpace, lti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g. ``s^2 + 3s + 5`` would be represented as
``[1, 3, 5]``)
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{s^2 + 3s + 3}{s^2 + 2s + 1}`:
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den)
TransferFunctionContinuous(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `TransferFunction` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return TransferFunction(*cont2discrete((self.num, self.den),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class TransferFunctionDiscrete(TransferFunction, dlti):
r"""
Discrete-time Linear Time Invariant system in transfer function form.
Represents the system as the transfer function
:math:`H(z)=\sum_{i=0}^N b[N-i] z^i / \sum_{j=0}^M a[M-j] z^j`, where
:math:`b` are elements of the numerator `num`, :math:`a` are elements of
the denominator `den`, and ``N == len(b) - 1``, ``M == len(a) - 1``.
Discrete-time `TransferFunction` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system: arguments
The `TransferFunction` class can be instantiated with 1 or 2
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
ZerosPolesGain, StateSpace, dlti
tf2ss, tf2zpk, tf2sos
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
If (numerator, denominator) is passed in for ``*system``, coefficients
for both the numerator and denominator should be specified in descending
exponent order (e.g., ``z^2 + 3z + 5`` would be represented as
``[1, 3, 5]``).
Examples
--------
Construct the transfer function
:math:`H(z) = \frac{z^2 + 3z + 3}{z^2 + 2z + 1}` with a sampling time of
0.5 seconds:
>>> from scipy import signal
>>> num = [1, 3, 3]
>>> den = [1, 2, 1]
>>> signal.TransferFunction(num, den, 0.5)
TransferFunctionDiscrete(
array([ 1., 3., 3.]),
array([ 1., 2., 1.]),
dt: 0.5
)
"""
pass
class ZerosPolesGain(LinearTimeInvariant):
r"""
Linear Time Invariant system class in zeros, poles, gain form.
Represents the system as the continuous- or discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
`ZerosPolesGain` systems inherit additional functionality from the `lti`,
respectively the `dlti` classes, depending on which system representation
is used.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, lti, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> from scipy import signal
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Construct the transfer function
:math:`H(z) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time
of 0.1 seconds:
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
def __new__(cls, *system, **kwargs):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_zpk()
# Choose whether to inherit from `lti` or from `dlti`
if cls is ZerosPolesGain:
if kwargs.get('dt') is None:
return ZerosPolesGainContinuous.__new__(
ZerosPolesGainContinuous,
*system,
**kwargs)
else:
return ZerosPolesGainDiscrete.__new__(
ZerosPolesGainDiscrete,
*system,
**kwargs
)
# No special conversion needed
return super(ZerosPolesGain, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the zeros, poles, gain system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
super().__init__(**kwargs)
self._zeros = None
self._poles = None
self._gain = None
self.zeros, self.poles, self.gain = system
def __repr__(self):
"""Return representation of the `ZerosPolesGain` system."""
return '{0}(\n{1},\n{2},\n{3},\ndt: {4}\n)'.format(
self.__class__.__name__,
repr(self.zeros),
repr(self.poles),
repr(self.gain),
repr(self.dt),
)
@property
def zeros(self):
"""Zeros of the `ZerosPolesGain` system."""
return self._zeros
@zeros.setter
def zeros(self, zeros):
self._zeros = atleast_1d(zeros)
# Update dimensions
if len(self.zeros.shape) > 1:
self.outputs, self.inputs = self.zeros.shape
else:
self.outputs = 1
self.inputs = 1
@property
def poles(self):
"""Poles of the `ZerosPolesGain` system."""
return self._poles
@poles.setter
def poles(self, poles):
self._poles = atleast_1d(poles)
@property
def gain(self):
"""Gain of the `ZerosPolesGain` system."""
return self._gain
@gain.setter
def gain(self, gain):
self._gain = gain
def _copy(self, system):
"""
Copy the parameters of another `ZerosPolesGain` system.
Parameters
----------
system : instance of `ZerosPolesGain`
The zeros, poles gain system that is to be copied
"""
self.poles = system.poles
self.zeros = system.zeros
self.gain = system.gain
def to_tf(self):
"""
Convert system representation to `TransferFunction`.
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain),
**self._dt_dict)
def to_zpk(self):
"""
Return a copy of the current 'ZerosPolesGain' system.
Returns
-------
sys : instance of `ZerosPolesGain`
The current system (copy)
"""
return copy.deepcopy(self)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain),
**self._dt_dict)
class ZerosPolesGainContinuous(ZerosPolesGain, lti):
r"""
Continuous-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the continuous time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Continuous-time `ZerosPolesGain` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
See Also
--------
TransferFunction, StateSpace, lti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
Construct the transfer function
:math:`H(s)=\frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> from scipy import signal
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `ZerosPolesGain` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `ZerosPolesGain`
"""
return ZerosPolesGain(
*cont2discrete((self.zeros, self.poles, self.gain),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class ZerosPolesGainDiscrete(ZerosPolesGain, dlti):
r"""
Discrete-time Linear Time Invariant system in zeros, poles, gain form.
Represents the system as the discrete-time transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Discrete-time `ZerosPolesGain` systems inherit additional functionality
from the `dlti` class.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3
arguments. The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, StateSpace, dlti
zpk2ss, zpk2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies. It is better to convert to the specific system
representation first. For example, call ``sys = sys.to_ss()`` before
accessing/changing the A, B, C, D system matrices.
Examples
--------
Construct the transfer function
:math:`H(s) = \frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}`:
>>> from scipy import signal
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5)
ZerosPolesGainContinuous(
array([1, 2]),
array([3, 4]),
5,
dt: None
)
Construct the transfer function
:math:`H(s) = \frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time
of 0.1 seconds:
>>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1)
ZerosPolesGainDiscrete(
array([1, 2]),
array([3, 4]),
5,
dt: 0.1
)
"""
pass
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
class StateSpace(LinearTimeInvariant):
r"""
Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u` or the discrete-time difference
equation :math:`x[k+1] = A x[k] + B u[k]`. `StateSpace` systems
inherit additional functionality from the `lti`, respectively the `dlti`
classes, depending on which system representation is used.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 4 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` or `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `None`
(continuous-time). Must be specified as a keyword argument, for
example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, lti, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
>>> sys.to_discrete(0.1)
StateSpaceDiscrete(
array([[1. , 0.1],
[0. , 1. ]]),
array([[0.005],
[0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[1. , 0.1],
[0. , 1. ]]),
array([[0.005],
[0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
# Override NumPy binary operations and ufuncs
__array_priority__ = 100.0
__array_ufunc__ = None
def __new__(cls, *system, **kwargs):
"""Create new StateSpace object and settle inheritance."""
# Handle object conversion if input is an instance of `lti`
if len(system) == 1 and isinstance(system[0], LinearTimeInvariant):
return system[0].to_ss()
# Choose whether to inherit from `lti` or from `dlti`
if cls is StateSpace:
if kwargs.get('dt') is None:
return StateSpaceContinuous.__new__(StateSpaceContinuous,
*system, **kwargs)
else:
return StateSpaceDiscrete.__new__(StateSpaceDiscrete,
*system, **kwargs)
# No special conversion needed
return super(StateSpace, cls).__new__(cls)
def __init__(self, *system, **kwargs):
"""Initialize the state space lti/dlti system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], LinearTimeInvariant):
return
# Remove system arguments, not needed by parents anymore
super().__init__(**kwargs)
self._A = None
self._B = None
self._C = None
self._D = None
self.A, self.B, self.C, self.D = abcd_normalize(*system)
def __repr__(self):
"""Return representation of the `StateSpace` system."""
return '{0}(\n{1},\n{2},\n{3},\n{4},\ndt: {5}\n)'.format(
self.__class__.__name__,
repr(self.A),
repr(self.B),
repr(self.C),
repr(self.D),
repr(self.dt),
)
def _check_binop_other(self, other):
return isinstance(other, (StateSpace, np.ndarray, float, complex,
np.number, int))
def __mul__(self, other):
"""
Post-multiply another system or a scalar
Handles multiplication of systems in the sense of a frequency domain
multiplication. That means, given two systems E1(s) and E2(s), their
multiplication, H(s) = E1(s) * E2(s), means that applying H(s) to U(s)
is equivalent to first applying E2(s), and then E1(s).
Notes
-----
For SISO systems the order of system application does not matter.
However, for MIMO systems, where the two systems are matrices, the
order above ensures standard Matrix multiplication rules apply.
"""
if not self._check_binop_other(other):
return NotImplemented
if isinstance(other, StateSpace):
# Disallow mix of discrete and continuous systems.
if type(other) is not type(self):
return NotImplemented
if self.dt != other.dt:
raise TypeError('Cannot multiply systems with different `dt`.')
n1 = self.A.shape[0]
n2 = other.A.shape[0]
# Interconnection of systems
# x1' = A1 x1 + B1 u1
# y1 = C1 x1 + D1 u1
# x2' = A2 x2 + B2 y1
# y2 = C2 x2 + D2 y1
#
# Plugging in with u1 = y2 yields
# [x1'] [A1 B1*C2 ] [x1] [B1*D2]
# [x2'] = [0 A2 ] [x2] + [B2 ] u2
# [x1]
# y2 = [C1 D1*C2] [x2] + D1*D2 u2
a = np.vstack((np.hstack((self.A, np.dot(self.B, other.C))),
np.hstack((zeros((n2, n1)), other.A))))
b = np.vstack((np.dot(self.B, other.D), other.B))
c = np.hstack((self.C, np.dot(self.D, other.C)))
d = np.dot(self.D, other.D)
else:
# Assume that other is a scalar / matrix
# For post multiplication the input gets scaled
a = self.A
b = np.dot(self.B, other)
c = self.C
d = np.dot(self.D, other)
common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ())
return StateSpace(np.asarray(a, dtype=common_dtype),
np.asarray(b, dtype=common_dtype),
np.asarray(c, dtype=common_dtype),
np.asarray(d, dtype=common_dtype),
**self._dt_dict)
def __rmul__(self, other):
"""Pre-multiply a scalar or matrix (but not StateSpace)"""
if not self._check_binop_other(other) or isinstance(other, StateSpace):
return NotImplemented
# For pre-multiplication only the output gets scaled
a = self.A
b = self.B
c = np.dot(other, self.C)
d = np.dot(other, self.D)
common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ())
return StateSpace(np.asarray(a, dtype=common_dtype),
np.asarray(b, dtype=common_dtype),
np.asarray(c, dtype=common_dtype),
np.asarray(d, dtype=common_dtype),
**self._dt_dict)
def __neg__(self):
"""Negate the system (equivalent to pre-multiplying by -1)."""
return StateSpace(self.A, self.B, -self.C, -self.D, **self._dt_dict)
def __add__(self, other):
"""
Adds two systems in the sense of frequency domain addition.
"""
if not self._check_binop_other(other):
return NotImplemented
if isinstance(other, StateSpace):
# Disallow mix of discrete and continuous systems.
if type(other) is not type(self):
raise TypeError('Cannot add {} and {}'.format(type(self),
type(other)))
if self.dt != other.dt:
raise TypeError('Cannot add systems with different `dt`.')
# Interconnection of systems
# x1' = A1 x1 + B1 u
# y1 = C1 x1 + D1 u
# x2' = A2 x2 + B2 u
# y2 = C2 x2 + D2 u
# y = y1 + y2
#
# Plugging in yields
# [x1'] [A1 0 ] [x1] [B1]
# [x2'] = [0 A2] [x2] + [B2] u
# [x1]
# y = [C1 C2] [x2] + [D1 + D2] u
a = linalg.block_diag(self.A, other.A)
b = np.vstack((self.B, other.B))
c = np.hstack((self.C, other.C))
d = self.D + other.D
else:
other = np.atleast_2d(other)
if self.D.shape == other.shape:
# A scalar/matrix is really just a static system (A=0, B=0, C=0)
a = self.A
b = self.B
c = self.C
d = self.D + other
else:
raise ValueError("Cannot add systems with incompatible "
"dimensions ({} and {})"
.format(self.D.shape, other.shape))
common_dtype = np.find_common_type((a.dtype, b.dtype, c.dtype, d.dtype), ())
return StateSpace(np.asarray(a, dtype=common_dtype),
np.asarray(b, dtype=common_dtype),
np.asarray(c, dtype=common_dtype),
np.asarray(d, dtype=common_dtype),
**self._dt_dict)
def __sub__(self, other):
if not self._check_binop_other(other):
return NotImplemented
return self.__add__(-other)
def __radd__(self, other):
if not self._check_binop_other(other):
return NotImplemented
return self.__add__(other)
def __rsub__(self, other):
if not self._check_binop_other(other):
return NotImplemented
return (-self).__add__(other)
def __truediv__(self, other):
"""
Divide by a scalar
"""
# Division by non-StateSpace scalars
if not self._check_binop_other(other) or isinstance(other, StateSpace):
return NotImplemented
if isinstance(other, np.ndarray) and other.ndim > 0:
# It's ambiguous what this means, so disallow it
raise ValueError("Cannot divide StateSpace by non-scalar numpy arrays")
return self.__mul__(1/other)
@property
def A(self):
"""State matrix of the `StateSpace` system."""
return self._A
@A.setter
def A(self, A):
self._A = _atleast_2d_or_none(A)
@property
def B(self):
"""Input matrix of the `StateSpace` system."""
return self._B
@B.setter
def B(self, B):
self._B = _atleast_2d_or_none(B)
self.inputs = self.B.shape[-1]
@property
def C(self):
"""Output matrix of the `StateSpace` system."""
return self._C
@C.setter
def C(self, C):
self._C = _atleast_2d_or_none(C)
self.outputs = self.C.shape[0]
@property
def D(self):
"""Feedthrough matrix of the `StateSpace` system."""
return self._D
@D.setter
def D(self, D):
self._D = _atleast_2d_or_none(D)
def _copy(self, system):
"""
Copy the parameters of another `StateSpace` system.
Parameters
----------
system : instance of `StateSpace`
The state-space system that is to be copied
"""
self.A = system.A
self.B = system.B
self.C = system.C
self.D = system.D
def to_tf(self, **kwargs):
"""
Convert system representation to `TransferFunction`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_zpk(self, **kwargs):
"""
Convert system representation to `ZerosPolesGain`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D,
**kwargs), **self._dt_dict)
def to_ss(self):
"""
Return a copy of the current `StateSpace` system.
Returns
-------
sys : instance of `StateSpace`
The current system (copy)
"""
return copy.deepcopy(self)
class StateSpaceContinuous(StateSpace, lti):
r"""
Continuous-time Linear Time Invariant system in state-space form.
Represents the system as the continuous-time, first order differential
equation :math:`\dot{x} = A x + B u`.
Continuous-time `StateSpace` systems inherit additional functionality
from the `lti` class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
See Also
--------
TransferFunction, ZerosPolesGain, lti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[0, 1], [0, 0]])
>>> b = np.array([[0], [1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> sys = signal.StateSpace(a, b, c, d)
>>> print(sys)
StateSpaceContinuous(
array([[0, 1],
[0, 0]]),
array([[0],
[1]]),
array([[1, 0]]),
array([[0]]),
dt: None
)
"""
def to_discrete(self, dt, method='zoh', alpha=None):
"""
Returns the discretized `StateSpace` system.
Parameters: See `cont2discrete` for details.
Returns
-------
sys: instance of `dlti` and `StateSpace`
"""
return StateSpace(*cont2discrete((self.A, self.B, self.C, self.D),
dt,
method=method,
alpha=alpha)[:-1],
dt=dt)
class StateSpaceDiscrete(StateSpace, dlti):
r"""
Discrete-time Linear Time Invariant system in state-space form.
Represents the system as the discrete-time difference equation
:math:`x[k+1] = A x[k] + B u[k]`.
`StateSpace` systems inherit additional functionality from the `dlti`
class.
Parameters
----------
*system: arguments
The `StateSpace` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `dlti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
dt: float, optional
Sampling time [s] of the discrete-time systems. Defaults to `True`
(unspecified sampling time). Must be specified as a keyword argument,
for example, ``dt=0.1``.
See Also
--------
TransferFunction, ZerosPolesGain, dlti
ss2zpk, ss2tf, zpk2sos
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies. It is better to
convert to the specific system representation first. For example, call
``sys = sys.to_zpk()`` before accessing/changing the zeros, poles or gain.
Examples
--------
>>> from scipy import signal
>>> a = np.array([[1, 0.1], [0, 1]])
>>> b = np.array([[0.005], [0.1]])
>>> c = np.array([[1, 0]])
>>> d = np.array([[0]])
>>> signal.StateSpace(a, b, c, d, dt=0.1)
StateSpaceDiscrete(
array([[ 1. , 0.1],
[ 0. , 1. ]]),
array([[ 0.005],
[ 0.1 ]]),
array([[1, 0]]),
array([[0]]),
dt: 0.1
)
"""
pass
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
`odeint`. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses `scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for `scipy.integrate.odeint` for the full list of arguments.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
See Also
--------
lsim
Examples
--------
We'll use `lsim2` to simulate an analog Bessel filter applied to
a signal.
>>> from scipy.signal import bessel, lsim2
>>> import matplotlib.pyplot as plt
Create a low-pass Bessel filter with a cutoff of 12 Hz.
>>> b, a = bessel(N=5, Wn=2*np.pi*12, btype='lowpass', analog=True)
Generate data to which the filter is applied.
>>> t = np.linspace(0, 1.25, 500, endpoint=False)
The input signal is the sum of three sinusoidal curves, with
frequencies 4 Hz, 40 Hz, and 80 Hz. The filter should mostly
eliminate the 40 Hz and 80 Hz components, leaving just the 4 Hz signal.
>>> u = (np.cos(2*np.pi*4*t) + 0.6*np.sin(2*np.pi*40*t) +
... 0.5*np.cos(2*np.pi*80*t))
Simulate the filter with `lsim2`.
>>> tout, yout, xout = lsim2((b, a), U=u, T=t)
Plot the result.
>>> plt.plot(t, u, 'r', alpha=0.5, linewidth=1, label='input')
>>> plt.plot(tout, yout, 'k', linewidth=1.5, label='output')
>>> plt.legend(loc='best', shadow=True, framealpha=1)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
In a second example, we simulate a double integrator ``y'' = u``, with
a constant input ``u = 1``. We'll use the state space representation
of the integrator.
>>> from scipy.signal import lti
>>> A = np.array([[0, 1], [0, 0]])
>>> B = np.array([[0], [1]])
>>> C = np.array([[1, 0]])
>>> D = 0
>>> system = lti(A, B, C, D)
`t` and `u` define the time and input signal for the system to
be simulated.
>>> t = np.linspace(0, 5, num=50)
>>> u = np.ones_like(t)
Compute the simulation, and then plot `y`. As expected, the plot shows
the curve ``y = 0.5*t**2``.
>>> tout, y, x = lsim2(system, u, t)
>>> plt.plot(t, y)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an exception; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1, 1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A, x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C, transpose(xout))
return T, squeeze(transpose(yout)), xout
def _cast_to_array_dtype(in1, in2):
"""Cast array to dtype of other array, while avoiding ComplexWarning.
Those can be raised when casting complex to real.
"""
if numpy.issubdtype(in2.dtype, numpy.float64):
# dtype to cast to is not complex, so use .real
in1 = in1.real.astype(in2.dtype)
else:
in1 = in1.astype(in2.dtype)
return in1
def lsim(system, U, T, X0=None, interp=True):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `lti`)
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U = 0 or None, a zero input is used.
T : array_like
The time steps at which the input is defined and at which the
output is desired. Must be nonnegative, increasing, and equally spaced.
X0 : array_like, optional
The initial conditions on the state vector (zero by default).
interp : bool, optional
Whether to use linear (True, the default) or zero-order-hold (False)
interpolation for the input array.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time evolution of the state vector.
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
We'll use `lsim` to simulate an analog Bessel filter applied to
a signal.
>>> from scipy.signal import bessel, lsim
>>> import matplotlib.pyplot as plt
Create a low-pass Bessel filter with a cutoff of 12 Hz.
>>> b, a = bessel(N=5, Wn=2*np.pi*12, btype='lowpass', analog=True)
Generate data to which the filter is applied.
>>> t = np.linspace(0, 1.25, 500, endpoint=False)
The input signal is the sum of three sinusoidal curves, with
frequencies 4 Hz, 40 Hz, and 80 Hz. The filter should mostly
eliminate the 40 Hz and 80 Hz components, leaving just the 4 Hz signal.
>>> u = (np.cos(2*np.pi*4*t) + 0.6*np.sin(2*np.pi*40*t) +
... 0.5*np.cos(2*np.pi*80*t))
Simulate the filter with `lsim`.
>>> tout, yout, xout = lsim((b, a), U=u, T=t)
Plot the result.
>>> plt.plot(t, u, 'r', alpha=0.5, linewidth=1, label='input')
>>> plt.plot(tout, yout, 'k', linewidth=1.5, label='output')
>>> plt.legend(loc='best', shadow=True, framealpha=1)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
In a second example, we simulate a double integrator ``y'' = u``, with
a constant input ``u = 1``. We'll use the state space representation
of the integrator.
>>> from scipy.signal import lti
>>> A = np.array([[0.0, 1.0], [0.0, 0.0]])
>>> B = np.array([[0.0], [1.0]])
>>> C = np.array([[1.0, 0.0]])
>>> D = 0.0
>>> system = lti(A, B, C, D)
`t` and `u` define the time and input signal for the system to
be simulated.
>>> t = np.linspace(0, 5, num=50)
>>> u = np.ones_like(t)
Compute the simulation, and then plot `y`. As expected, the plot shows
the curve ``y = 0.5*t**2``.
>>> tout, y, x = lsim(system, u, t)
>>> plt.plot(t, y)
>>> plt.grid(alpha=0.3)
>>> plt.xlabel('t')
>>> plt.show()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('lsim can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D))
n_states = A.shape[0]
n_inputs = B.shape[1]
n_steps = T.size
if X0 is None:
X0 = zeros(n_states, sys.A.dtype)
xout = np.empty((n_steps, n_states), sys.A.dtype)
if T[0] == 0:
xout[0] = X0
elif T[0] > 0:
# step forward to initial time, with zero input
xout[0] = dot(X0, linalg.expm(transpose(A) * T[0]))
else:
raise ValueError("Initial time must be nonnegative")
no_input = (U is None or
(isinstance(U, (int, float)) and U == 0.) or
not np.any(U))
if n_steps == 1:
yout = squeeze(dot(xout, transpose(C)))
if not no_input:
yout += squeeze(dot(U, transpose(D)))
return T, squeeze(yout), squeeze(xout)
dt = T[1] - T[0]
if not np.allclose((T[1:] - T[:-1]) / dt, 1.0):
warnings.warn("Non-uniform timesteps are deprecated. Results may be "
"slow and/or inaccurate.", DeprecationWarning)
return lsim2(system, U, T, X0)
if no_input:
# Zero input: just use matrix exponential
# take transpose because state is a row vector
expAT_dt = linalg.expm(transpose(A) * dt)
for i in range(1, n_steps):
xout[i] = dot(xout[i-1], expAT_dt)
yout = squeeze(dot(xout, transpose(C)))
return T, squeeze(yout), squeeze(xout)
# Nonzero input
U = atleast_1d(U)
if U.ndim == 1:
U = U[:, np.newaxis]
if U.shape[0] != n_steps:
raise ValueError("U must have the same number of rows "
"as elements in T.")
if U.shape[1] != n_inputs:
raise ValueError("System does not define that many inputs.")
if not interp:
# Zero-order hold
# Algorithm: to integrate from time 0 to time dt, we solve
# xdot = A x + B u, x(0) = x0
# udot = 0, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 ] [ u0 ]
M = np.vstack([np.hstack([A * dt, B * dt]),
np.zeros((n_inputs, n_states + n_inputs))])
# transpose everything because the state and input are row vectors
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd = expMT[n_states:, :n_states]
for i in range(1, n_steps):
xout[i] = dot(xout[i-1], Ad) + dot(U[i-1], Bd)
else:
# Linear interpolation between steps
# Algorithm: to integrate from time 0 to time dt, with linear
# interpolation between inputs u(0) = u0 and u(dt) = u1, we solve
# xdot = A x + B u, x(0) = x0
# udot = (u1 - u0) / dt, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 I ] [ u0 ]
# [u1 - u0] [ 0 0 0 ] [u1 - u0]
M = np.vstack([np.hstack([A * dt, B * dt,
np.zeros((n_states, n_inputs))]),
np.hstack([np.zeros((n_inputs, n_states + n_inputs)),
np.identity(n_inputs)]),
np.zeros((n_inputs, n_states + 2 * n_inputs))])
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd1 = expMT[n_states+n_inputs:, :n_states]
Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1
for i in range(1, n_steps):
xout[i] = (dot(xout[i-1], Ad) + dot(U[i-1], Bd0) + dot(U[i], Bd1))
yout = (squeeze(dot(xout, transpose(C))) + squeeze(dot(U, transpose(D))))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : array_like
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval.
# TODO: This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7 * tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Compute the impulse response of a second order system with a repeated
root: ``x''(t) + 2*x'(t) + x(t) = u(t)``
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if X0 is None:
X = squeeze(sys.B)
else:
X = squeeze(sys.B + X0)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
_, h, _ = lsim(sys, 0., T, X, interp=False)
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, scipy.integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
Examples
--------
Compute the impulse response of a second order system with a repeated
root: ``x''(t) + 2*x'(t) + x(t) = u(t)``
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('impulse2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
ic = B + X0
Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lti = signal.lti([1.0], [1.0, 1.0])
>>> t, y = signal.step(lti)
>>> plt.plot(t, y)
>>> plt.xlabel('Time [s]')
>>> plt.ylabel('Amplitude')
>>> plt.title('Step response for 1. Order Lowpass')
>>> plt.grid()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0, interp=False)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
kwargs : various types
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`. See the documentation for
`scipy.integrate.odeint` for information about these arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.8.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> lti = signal.lti([1.0], [1.0, 1.0])
>>> t, y = signal.step2(lti)
>>> plt.plot(t, y)
>>> plt.xlabel('Time [s]')
>>> plt.ylabel('Amplitude')
>>> plt.title('Step response for 1. Order Lowpass')
>>> plt.grid()
"""
if isinstance(system, lti):
sys = system._as_ss()
elif isinstance(system, dlti):
raise AttributeError('step2 can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
def bode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is calculated
for every value in this array. If not given a reasonable set will be
calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> sys = signal.TransferFunction([1], [1, 1])
>>> w, mag, phase = signal.bode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = freqresp(system, w=w, n=n)
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
return w, mag, phase
def freqresp(system, w=None, n=10000):
r"""Calculate the frequency response of a continuous-time system.
Parameters
----------
system : an instance of the `lti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `lti`)
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is
calculated for every value in this array. If not given, a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``).
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function :math:`H(s) = \frac{5}{(s-1)^3}`:
>>> s1 = signal.ZerosPolesGain([], [1, 1, 1], [5])
>>> w, H = signal.freqresp(s1)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, lti):
if isinstance(system, (TransferFunction, ZerosPolesGain)):
sys = system
else:
sys = system._as_zpk()
elif isinstance(system, dlti):
raise AttributeError('freqresp can only be used with continuous-time '
'systems.')
else:
sys = lti(*system)._as_zpk()
if sys.inputs != 1 or sys.outputs != 1:
raise ValueError("freqresp() requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
if isinstance(sys, TransferFunction):
# In the call to freqs(), sys.num.ravel() is used because there are
# cases where sys.num is a 2-D array with a single row.
w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
elif isinstance(sys, ZerosPolesGain):
w, h = freqs_zpk(sys.zeros, sys.poles, sys.gain, worN=worN)
return w, h
# This class will be used by place_poles to return its results
# see https://code.activestate.com/recipes/52308/
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def _valid_inputs(A, B, poles, method, rtol, maxiter):
"""
Check the poles come in complex conjugage pairs
Check shapes of A, B and poles are compatible.
Check the method chosen is compatible with provided poles
Return update method to use and ordered poles
"""
poles = np.asarray(poles)
if poles.ndim > 1:
raise ValueError("Poles must be a 1D array like.")
# Will raise ValueError if poles do not come in complex conjugates pairs
poles = _order_complex_poles(poles)
if A.ndim > 2:
raise ValueError("A must be a 2D array/matrix.")
if B.ndim > 2:
raise ValueError("B must be a 2D array/matrix")
if A.shape[0] != A.shape[1]:
raise ValueError("A must be square")
if len(poles) > A.shape[0]:
raise ValueError("maximum number of poles is %d but you asked for %d" %
(A.shape[0], len(poles)))
if len(poles) < A.shape[0]:
raise ValueError("number of poles is %d but you should provide %d" %
(len(poles), A.shape[0]))
r = np.linalg.matrix_rank(B)
for p in poles:
if sum(p == poles) > r:
raise ValueError("at least one of the requested pole is repeated "
"more than rank(B) times")
# Choose update method
update_loop = _YT_loop
if method not in ('KNV0','YT'):
raise ValueError("The method keyword must be one of 'YT' or 'KNV0'")
if method == "KNV0":
update_loop = _KNV0_loop
if not all(np.isreal(poles)):
raise ValueError("Complex poles are not supported by KNV0")
if maxiter < 1:
raise ValueError("maxiter must be at least equal to 1")
# We do not check rtol <= 0 as the user can use a negative rtol to
# force maxiter iterations
if rtol > 1:
raise ValueError("rtol can not be greater than 1")
return update_loop, poles
def _order_complex_poles(poles):
"""
Check we have complex conjugates pairs and reorder P according to YT, ie
real_poles, complex_i, conjugate complex_i, ....
The lexicographic sort on the complex poles is added to help the user to
compare sets of poles.
"""
ordered_poles = np.sort(poles[np.isreal(poles)])
im_poles = []
for p in np.sort(poles[np.imag(poles) < 0]):
if np.conj(p) in poles:
im_poles.extend((p, np.conj(p)))
ordered_poles = np.hstack((ordered_poles, im_poles))
if poles.shape[0] != len(ordered_poles):
raise ValueError("Complex poles must come with their conjugates")
return ordered_poles
def _KNV0(B, ker_pole, transfer_matrix, j, poles):
"""
Algorithm "KNV0" Kautsky et Al. Robust pole
assignment in linear state feedback, Int journal of Control
1985, vol 41 p 1129->1155
https://la.epfl.ch/files/content/sites/la/files/
users/105941/public/KautskyNicholsDooren
"""
# Remove xj form the base
transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1)
# If we QR this matrix in full mode Q=Q0|Q1
# then Q1 will be a single column orthogonnal to
# Q0, that's what we are looking for !
# After merge of gh-4249 great speed improvements could be achieved
# using QR updates instead of full QR in the line below
# To debug with numpy qr uncomment the line below
# Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete")
Q, R = s_qr(transfer_matrix_not_j, mode="full")
mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T)
yj = np.dot(mat_ker_pj, Q[:, -1])
# If Q[:, -1] is "almost" orthogonal to ker_pole[j] its
# projection into ker_pole[j] will yield a vector
# close to 0. As we are looking for a vector in ker_pole[j]
# simply stick with transfer_matrix[:, j] (unless someone provides me with
# a better choice ?)
if not np.allclose(yj, 0):
xj = yj/np.linalg.norm(yj)
transfer_matrix[:, j] = xj
# KNV does not support complex poles, using YT technique the two lines
# below seem to work 9 out of 10 times but it is not reliable enough:
# transfer_matrix[:, j]=real(xj)
# transfer_matrix[:, j+1]=imag(xj)
# Add this at the beginning of this function if you wish to test
# complex support:
# if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])):
# return
# Problems arise when imag(xj)=>0 I have no idea on how to fix this
def _YT_real(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.1 page 19 related to real pairs
"""
# step 1 page 19
u = Q[:, -2, np.newaxis]
v = Q[:, -1, np.newaxis]
# step 2 page 19
m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) -
np.dot(v, u.T)), ker_pole[j])
# step 3 page 19
um, sm, vm = np.linalg.svd(m)
# mu1, mu2 two first columns of U => 2 first lines of U.T
mu1, mu2 = um.T[:2, :, np.newaxis]
# VM is V.T with numpy we want the first two lines of V.T
nu1, nu2 = vm[:2, :, np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
transfer_matrix_j_mo_transfer_matrix_j = np.vstack((
transfer_matrix[:, i, np.newaxis],
transfer_matrix[:, j, np.newaxis]))
if not np.allclose(sm[0], sm[1]):
ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1)
ker_pole_i_nu1 = np.dot(ker_pole[j], nu1)
ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1))
else:
ker_pole_ij = np.vstack((
np.hstack((ker_pole[i],
np.zeros(ker_pole[i].shape))),
np.hstack((np.zeros(ker_pole[j].shape),
ker_pole[j]))
))
mu_nu_matrix = np.vstack(
(np.hstack((mu1, mu2)), np.hstack((nu1, nu2)))
)
ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix)
transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_ij, 0):
transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij /
np.linalg.norm(transfer_matrix_ij))
transfer_matrix[:, i] = transfer_matrix_ij[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = transfer_matrix_ij[
transfer_matrix[:, i].shape[0]:, 0
]
else:
# As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to
# Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to
# ker_pole_mu_nu and iterate. As we are looking for a vector in
# Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help
# (that's a guess, not a claim !)
transfer_matrix[:, i] = ker_pole_mu_nu[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = ker_pole_mu_nu[
transfer_matrix[:, i].shape[0]:, 0
]
def _YT_complex(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.2 page 20 related to complex pairs
"""
# step 1 page 20
ur = np.sqrt(2)*Q[:, -2, np.newaxis]
ui = np.sqrt(2)*Q[:, -1, np.newaxis]
u = ur + 1j*ui
# step 2 page 20
ker_pole_ij = ker_pole[i]
m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) -
np.dot(np.conj(u), u.T)), ker_pole_ij)
# step 3 page 20
e_val, e_vec = np.linalg.eig(m)
# sort eigenvalues according to their module
e_val_idx = np.argsort(np.abs(e_val))
mu1 = e_vec[:, e_val_idx[-1], np.newaxis]
mu2 = e_vec[:, e_val_idx[-2], np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
# remember transfer_matrix_i has been split as
# transfer_matrix[i]=real(transfer_matrix_i) and
# transfer_matrix[j]=imag(transfer_matrix_i)
transfer_matrix_j_mo_transfer_matrix_j = (
transfer_matrix[:, i, np.newaxis] +
1j*transfer_matrix[:, j, np.newaxis]
)
if not np.allclose(np.abs(e_val[e_val_idx[-1]]),
np.abs(e_val[e_val_idx[-2]])):
ker_pole_mu = np.dot(ker_pole_ij, mu1)
else:
mu1_mu2_matrix = np.hstack((mu1, mu2))
ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix)
transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_i_j, 0):
transfer_matrix_i_j = (transfer_matrix_i_j /
np.linalg.norm(transfer_matrix_i_j))
transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0])
transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0])
else:
# same idea as in YT_real
transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0])
transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0])
def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Algorithm "YT" Tits, Yang. Globally Convergent
Algorithms for Robust Pole Assignment by State Feedback
https://hdl.handle.net/1903/5598
The poles P have to be sorted accordingly to section 6.2 page 20
"""
# The IEEE edition of the YT paper gives useful information on the
# optimal update order for the real poles in order to minimize the number
# of times we have to loop over all poles, see page 1442
nb_real = poles[np.isreal(poles)].shape[0]
# hnb => Half Nb Real
hnb = nb_real // 2
# Stick to the indices in the paper and then remove one to get numpy array
# index it is a bit easier to link the code to the paper this way even if it
# is not very clean. The paper is unclear about what should be done when
# there is only one real pole => use KNV0 on this real pole seem to work
if nb_real > 0:
#update the biggest real pole with the smallest one
update_order = [[nb_real], [1]]
else:
update_order = [[],[]]
r_comp = np.arange(nb_real+1, len(poles)+1, 2)
# step 1.a
r_p = np.arange(1, hnb+nb_real % 2)
update_order[0].extend(2*r_p)
update_order[1].extend(2*r_p+1)
# step 1.b
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 1.c
r_p = np.arange(1, hnb+1)
update_order[0].extend(2*r_p-1)
update_order[1].extend(2*r_p)
# step 1.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.a
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+j)
# step 2.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.c
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(hnb+1, nb_real+1):
idx_1 = i+j
if idx_1 > nb_real:
idx_1 = i+j-nb_real
update_order[0].append(i)
update_order[1].append(idx_1)
# step 2.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 3.a
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+hnb)
# step 3.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
update_order = np.array(update_order).T-1
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for i, j in update_order:
if i == j:
assert i == 0, "i!=0 for KNV call in YT"
assert np.isreal(poles[i]), "calling KNV on a complex pole"
_KNV0(B, ker_pole, transfer_matrix, i, poles)
else:
transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j),
axis=1)
# after merge of gh-4249 great speed improvements could be
# achieved using QR updates instead of full QR in the line below
#to debug with numpy qr uncomment the line below
#Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete")
Q, _ = s_qr(transfer_matrix_not_i_j, mode="full")
if np.isreal(poles[i]):
assert np.isreal(poles[j]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_real(ker_pole, Q, transfer_matrix, i, j)
else:
assert ~np.isreal(poles[i]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_complex(ker_pole, Q, transfer_matrix, i, j)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs(
(det_transfer_matrix -
det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Loop over all poles one by one and apply KNV method 0 algorithm
"""
# This method is useful only because we need to be able to call
# _KNV0 from YT without looping over all poles, otherwise it would
# have been fine to mix _KNV0_loop and _KNV0 in a single function
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for j in range(B.shape[0]):
_KNV0(B, ker_pole, transfer_matrix, j, poles)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30):
"""
Compute K such that eigenvalues (A - dot(B, K))=poles.
K is the gain matrix such as the plant described by the linear system
``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``,
as close as possible to those asked for in poles.
SISO, MISO and MIMO systems are supported.
Parameters
----------
A, B : ndarray
State-space representation of linear system ``AX + BU``.
poles : array_like
Desired real poles and/or complex conjugates poles.
Complex poles are only supported with ``method="YT"`` (default).
method: {'YT', 'KNV0'}, optional
Which method to choose to find the gain matrix K. One of:
- 'YT': Yang Tits
- 'KNV0': Kautsky, Nichols, Van Dooren update method 0
See References and Notes for details on the algorithms.
rtol: float, optional
After each iteration the determinant of the eigenvectors of
``A - B*K`` is compared to its previous value, when the relative
error between these two values becomes lower than `rtol` the algorithm
stops. Default is 1e-3.
maxiter: int, optional
Maximum number of iterations to compute the gain matrix.
Default is 30.
Returns
-------
full_state_feedback : Bunch object
full_state_feedback is composed of:
gain_matrix : 1-D ndarray
The closed loop matrix K such as the eigenvalues of ``A-BK``
are as close as possible to the requested poles.
computed_poles : 1-D ndarray
The poles corresponding to ``A-BK`` sorted as first the real
poles in increasing order, then the complex congugates in
lexicographic order.
requested_poles : 1-D ndarray
The poles the algorithm was asked to place sorted as above,
they may differ from what was achieved.
X : 2-D ndarray
The transfer matrix such as ``X * diag(poles) = (A - B*K)*X``
(see Notes)
rtol : float
The relative tolerance achieved on ``det(X)`` (see Notes).
`rtol` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
nb_iter : int
The number of iterations performed before converging.
`nb_iter` will be NaN if it is possible to solve the system
``diag(poles) = (A - B*K)``, or 0 when the optimization
algorithms can't do anything i.e when ``B.shape[1] == 1``.
Notes
-----
The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et
al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer
matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses
rank-2 updates. This yields on average more robust solutions (see [2]_
pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV
does not in its original version. Only update method 0 proposed by KNV has
been implemented here, hence the name ``'KNV0'``.
KNV extended to complex poles is used in Matlab's ``place`` function, YT is
distributed under a non-free licence by Slicot under the name ``robpole``.
It is unclear and undocumented how KNV0 has been extended to complex poles
(Tits and Yang claim on page 14 of their paper that their method can not be
used to extend KNV to complex poles), therefore only YT supports them in
this implementation.
As the solution to the problem of pole placement is not unique for MIMO
systems, both methods start with a tentative transfer matrix which is
altered in various way to increase its determinant. Both methods have been
proven to converge to a stable solution, however depending on the way the
initial transfer matrix is chosen they will converge to different
solutions and therefore there is absolutely no guarantee that using
``'KNV0'`` will yield results similar to Matlab's or any other
implementation of these algorithms.
Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'``
is only provided because it is needed by ``'YT'`` in some specific cases.
Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'``
when ``abs(det(X))`` is used as a robustness indicator.
[2]_ is available as a technical report on the following URL:
https://hdl.handle.net/1903/5598
References
----------
.. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment
in linear state feedback", International Journal of Control, Vol. 41
pp. 1129-1155, 1985.
.. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust
pole assignment by state feedback", IEEE Transactions on Automatic
Control, Vol. 41, pp. 1432-1452, 1996.
Examples
--------
A simple example demonstrating real pole placement using both KNV and YT
algorithms. This is example number 1 from section 4 of the reference KNV
publication ([1]_):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ],
... [-0.5814, -4.290, 0, 0.6750 ],
... [ 1.067, 4.273, -6.654, 5.893 ],
... [ 0.0480, 4.273, 1.343, -2.104 ]])
>>> B = np.array([[ 0, 5.679 ],
... [ 1.136, 1.136 ],
... [ 0, 0, ],
... [-3.146, 0 ]])
>>> P = np.array([-0.2, -0.5, -5.0566, -8.6659])
Now compute K with KNV method 0, with the default YT method and with the YT
method while forcing 100 iterations of the algorithm and print some results
after each call.
>>> fsf1 = signal.place_poles(A, B, P, method='KNV0')
>>> fsf1.gain_matrix
array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785],
[ 0.50587268, 0.57779091, 0.51795763, -0.41991442]])
>>> fsf2 = signal.place_poles(A, B, P) # uses YT method
>>> fsf2.computed_poles
array([-8.6659, -5.0566, -0.5 , -0.2 ])
>>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100)
>>> fsf3.X
array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j],
[-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j],
[-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j],
[ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]])
The absolute value of the determinant of X is a good indicator to check the
robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing
it. Below a comparison of the robustness of the results above:
>>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X))
True
>>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X))
True
Now a simple example for complex poles:
>>> A = np.array([[ 0, 7/3., 0, 0 ],
... [ 0, 0, 0, 7/9. ],
... [ 0, 0, 0, 0 ],
... [ 0, 0, 0, 0 ]])
>>> B = np.array([[ 0, 0 ],
... [ 0, 0 ],
... [ 1, 0 ],
... [ 0, 1 ]])
>>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3.
>>> fsf = signal.place_poles(A, B, P, method='YT')
We can plot the desired and computed poles in the complex plane:
>>> t = np.linspace(0, 2*np.pi, 401)
>>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle
>>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag,
... 'wo', label='Desired')
>>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx',
... label='Placed')
>>> plt.grid()
>>> plt.axis('image')
>>> plt.axis([-1.1, 1.1, -1.1, 1.1])
>>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1)
"""
# Move away all the inputs checking, it only adds noise to the code
update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter)
# The current value of the relative tolerance we achieved
cur_rtol = 0
# The number of iterations needed before converging
nb_iter = 0
# Step A: QR decomposition of B page 1132 KN
# to debug with numpy qr uncomment the line below
# u, z = np.linalg.qr(B, mode="complete")
u, z = s_qr(B, mode="full")
rankB = np.linalg.matrix_rank(B)
u0 = u[:, :rankB]
u1 = u[:, rankB:]
z = z[:rankB, :]
# If we can use the identity matrix as X the solution is obvious
if B.shape[0] == rankB:
# if B is square and full rank there is only one solution
# such as (A+BK)=inv(X)*diag(P)*X with X=eye(A.shape[0])
# i.e K=inv(B)*(diag(P)-A)
# if B has as many lines as its rank (but not square) there are many
# solutions and we can choose one using least squares
# => use lstsq in both cases.
# In both cases the transfer matrix X will be eye(A.shape[0]) and I
# can hardly think of a better one so there is nothing to optimize
#
# for complex poles we use the following trick
#
# |a -b| has for eigenvalues a+b and a-b
# |b a|
#
# |a+bi 0| has the obvious eigenvalues a+bi and a-bi
# |0 a-bi|
#
# e.g solving the first one in R gives the solution
# for the second one in C
diag_poles = np.zeros(A.shape)
idx = 0
while idx < poles.shape[0]:
p = poles[idx]
diag_poles[idx, idx] = np.real(p)
if ~np.isreal(p):
diag_poles[idx, idx+1] = -np.imag(p)
diag_poles[idx+1, idx+1] = np.real(p)
diag_poles[idx+1, idx] = np.imag(p)
idx += 1 # skip next one
idx += 1
gain_matrix = np.linalg.lstsq(B, diag_poles-A, rcond=-1)[0]
transfer_matrix = np.eye(A.shape[0])
cur_rtol = np.nan
nb_iter = np.nan
else:
# step A (p1144 KNV) and beginning of step F: decompose
# dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors
# in the same loop
ker_pole = []
# flag to skip the conjugate of a complex pole
skip_conjugate = False
# select orthonormal base ker_pole for each Pole and vectors for
# transfer_matrix
for j in range(B.shape[0]):
if skip_conjugate:
skip_conjugate = False
continue
pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T
# after QR Q=Q0|Q1
# only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix.
# Q1 is orthogonnal to Q0 and will be multiplied by the zeros in
# R when using mode "complete". In default mode Q1 and the zeros
# in R are not computed
# To debug with numpy qr uncomment the line below
# Q, _ = np.linalg.qr(pole_space_j, mode="complete")
Q, _ = s_qr(pole_space_j, mode="full")
ker_pole_j = Q[:, pole_space_j.shape[1]:]
# We want to select one vector in ker_pole_j to build the transfer
# matrix, however qr returns sometimes vectors with zeros on the
# same line for each pole and this yields very long convergence
# times.
# Or some other times a set of vectors, one with zero imaginary
# part and one (or several) with imaginary parts. After trying
# many ways to select the best possible one (eg ditch vectors
# with zero imaginary part for complex poles) I ended up summing
# all vectors in ker_pole_j, this solves 100% of the problems and
# is a valid choice for transfer_matrix.
# This way for complex poles we are sure to have a non zero
# imaginary part that way, and the problem of lines full of zeros
# in transfer_matrix is solved too as when a vector from
# ker_pole_j has a zero the other one(s) when
# ker_pole_j.shape[1]>1) for sure won't have a zero there.
transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis]
transfer_matrix_j = (transfer_matrix_j /
np.linalg.norm(transfer_matrix_j))
if ~np.isreal(poles[j]): # complex pole
transfer_matrix_j = np.hstack([np.real(transfer_matrix_j),
np.imag(transfer_matrix_j)])
ker_pole.extend([ker_pole_j, ker_pole_j])
# Skip next pole as it is the conjugate
skip_conjugate = True
else: # real pole, nothing to do
ker_pole.append(ker_pole_j)
if j == 0:
transfer_matrix = transfer_matrix_j
else:
transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j))
if rankB > 1: # otherwise there is nothing we can optimize
stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix,
poles, B, maxiter, rtol)
if not stop and rtol > 0:
# if rtol<=0 the user has probably done that on purpose,
# don't annoy him
err_msg = (
"Convergence was not reached after maxiter iterations.\n"
"You asked for a relative tolerance of %f we got %f" %
(rtol, cur_rtol)
)
warnings.warn(err_msg)
# reconstruct transfer_matrix to match complex conjugate pairs,
# ie transfer_matrix_j/transfer_matrix_j+1 are
# Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after
transfer_matrix = transfer_matrix.astype(complex)
idx = 0
while idx < poles.shape[0]-1:
if ~np.isreal(poles[idx]):
rel = transfer_matrix[:, idx].copy()
img = transfer_matrix[:, idx+1]
# rel will be an array referencing a column of transfer_matrix
# if we don't copy() it will changer after the next line and
# and the line after will not yield the correct value
transfer_matrix[:, idx] = rel-1j*img
transfer_matrix[:, idx+1] = rel+1j*img
idx += 1 # skip next one
idx += 1
try:
m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles),
transfer_matrix.T)).T
gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A))
except np.linalg.LinAlgError as e:
raise ValueError("The poles you've chosen can't be placed. "
"Check the controllability matrix and try "
"another set of poles") from e
# Beware: Kautsky solves A+BK but the usual form is A-BK
gain_matrix = -gain_matrix
# K still contains complex with ~=0j imaginary parts, get rid of them
gain_matrix = np.real(gain_matrix)
full_state_feedback = Bunch()
full_state_feedback.gain_matrix = gain_matrix
full_state_feedback.computed_poles = _order_complex_poles(
np.linalg.eig(A - np.dot(B, gain_matrix))[0]
)
full_state_feedback.requested_poles = poles
full_state_feedback.X = transfer_matrix
full_state_feedback.rtol = cur_rtol
full_state_feedback.nb_iter = nb_iter
return full_state_feedback
def dlsim(system, u, t=None, x0=None):
"""
Simulate output of a discrete-time linear system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
u : array_like
An input array describing the input at each time `t` (interpolation is
assumed between given times). If there are multiple inputs, then each
column of the rank-2 array represents an input.
t : array_like, optional
The time steps at which the input is defined. If `t` is given, it
must be the same length as `u`, and the final value in `t` determines
the number of steps returned in the output.
x0 : array_like, optional
The initial conditions on the state vector (zero by default).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : ndarray
System response, as a 1-D array.
xout : ndarray, optional
Time-evolution of the state-vector. Only generated if the input is a
`StateSpace` system.
See Also
--------
lsim, dstep, dimpulse, cont2discrete
Examples
--------
A simple integrator transfer function with a discrete time step of 1.0
could be implemented as:
>>> from scipy import signal
>>> tf = ([1.0,], [1.0, -1.0], 1.0)
>>> t_in = [0.0, 1.0, 2.0, 3.0]
>>> u = np.asarray([0.0, 0.0, 1.0, 1.0])
>>> t_out, y = signal.dlsim(tf, u, t=t_in)
>>> y.T
array([[ 0., 0., 0., 1.]])
"""
# Convert system to dlti-StateSpace
if isinstance(system, lti):
raise AttributeError('dlsim can only be used with discrete-time dlti '
'systems.')
elif not isinstance(system, dlti):
system = dlti(*system[:-1], dt=system[-1])
# Condition needed to ensure output remains compatible
is_ss_input = isinstance(system, StateSpace)
system = system._as_ss()
u = np.atleast_1d(u)
if u.ndim == 1:
u = np.atleast_2d(u).T
if t is None:
out_samples = len(u)
stoptime = (out_samples - 1) * system.dt
else:
stoptime = t[-1]
out_samples = int(np.floor(stoptime / system.dt)) + 1
# Pre-build output arrays
xout = np.zeros((out_samples, system.A.shape[0]))
yout = np.zeros((out_samples, system.C.shape[0]))
tout = np.linspace(0.0, stoptime, num=out_samples)
# Check initial condition
if x0 is None:
xout[0, :] = np.zeros((system.A.shape[1],))
else:
xout[0, :] = np.asarray(x0)
# Pre-interpolate inputs into the desired time steps
if t is None:
u_dt = u
else:
if len(u.shape) == 1:
u = u[:, np.newaxis]
u_dt_interp = interp1d(t, u.transpose(), copy=False, bounds_error=True)
u_dt = u_dt_interp(tout).transpose()
# Simulate the system
for i in range(0, out_samples - 1):
xout[i+1, :] = (np.dot(system.A, xout[i, :]) +
np.dot(system.B, u_dt[i, :]))
yout[i, :] = (np.dot(system.C, xout[i, :]) +
np.dot(system.D, u_dt[i, :]))
# Last point
yout[out_samples-1, :] = (np.dot(system.C, xout[out_samples-1, :]) +
np.dot(system.D, u_dt[out_samples-1, :]))
if is_ss_input:
return tout, yout, xout
else:
return tout, yout
def dimpulse(system, x0=None, t=None, n=None):
"""
Impulse response of discrete-time system.
Parameters
----------
system : tuple of array_like or instance of `dlti`
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Time values for the output, as a 1-D array.
yout : tuple of ndarray
Impulse response of system. Each element of the tuple represents
the output of the system based on an impulse in each input.
See Also
--------
impulse, dstep, dlsim, cont2discrete
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> butter = signal.dlti(*signal.butter(3, 0.5))
>>> t, y = signal.dimpulse(butter, n=25)
>>> plt.step(t, np.squeeze(y))
>>> plt.grid()
>>> plt.xlabel('n [samples]')
>>> plt.ylabel('Amplitude')
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dimpulse can only be used with discrete-time '
'dlti systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[0, i] = 1.0
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dstep(system, x0=None, t=None, n=None):
"""
Step response of discrete-time system.
Parameters
----------
system : tuple of array_like
A tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1: (instance of `dlti`)
* 3: (num, den, dt)
* 4: (zeros, poles, gain, dt)
* 5: (A, B, C, D, dt)
x0 : array_like, optional
Initial state-vector. Defaults to zero.
t : array_like, optional
Time points. Computed if not given.
n : int, optional
The number of time points to compute (if `t` is not given).
Returns
-------
tout : ndarray
Output time points, as a 1-D array.
yout : tuple of ndarray
Step response of system. Each element of the tuple represents
the output of the system based on a step response to each input.
See Also
--------
step, dimpulse, dlsim, cont2discrete
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> butter = signal.dlti(*signal.butter(3, 0.5))
>>> t, y = signal.dstep(butter, n=25)
>>> plt.step(t, np.squeeze(y))
>>> plt.grid()
>>> plt.xlabel('n [samples]')
>>> plt.ylabel('Amplitude')
"""
# Convert system to dlti-StateSpace
if isinstance(system, dlti):
system = system._as_ss()
elif isinstance(system, lti):
raise AttributeError('dstep can only be used with discrete-time dlti '
'systems.')
else:
system = dlti(*system[:-1], dt=system[-1])._as_ss()
# Default to 100 samples if unspecified
if n is None:
n = 100
# If time is not specified, use the number of samples
# and system dt
if t is None:
t = np.linspace(0, n * system.dt, n, endpoint=False)
else:
t = np.asarray(t)
# For each input, implement a step change
yout = None
for i in range(0, system.inputs):
u = np.zeros((t.shape[0], system.inputs))
u[:, i] = np.ones((t.shape[0],))
one_output = dlsim(system, u, t=t, x0=x0)
if yout is None:
yout = (one_output[1],)
else:
yout = yout + (one_output[1],)
tout = one_output[0]
return tout, yout
def dfreqresp(system, w=None, n=10000, whole=False):
r"""
Calculate the frequency response of a discrete-time system.
Parameters
----------
system : an instance of the `dlti` class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (numerator, denominator, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
whole : bool, optional
Normally, if 'w' is not given, frequencies are computed from 0 to the
Nyquist frequency, pi radians/sample (upper-half of unit-circle). If
`whole` is True, compute frequencies from 0 to 2*pi radians/sample.
Returns
-------
w : 1D ndarray
Frequency array [radians/sample]
H : 1D ndarray
Array of complex magnitude values
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function
:math:`H(z) = \frac{1}{z^2 + 2z + 3}` with a sampling time of 0.05
seconds:
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
>>> w, H = signal.dfreqresp(sys)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if not isinstance(system, dlti):
if isinstance(system, lti):
raise AttributeError('dfreqresp can only be used with '
'discrete-time systems.')
system = dlti(*system[:-1], dt=system[-1])
if isinstance(system, StateSpace):
# No SS->ZPK code exists right now, just SS->TF->ZPK
system = system._as_tf()
if not isinstance(system, (TransferFunction, ZerosPolesGain)):
raise ValueError('Unknown system type')
if system.inputs != 1 or system.outputs != 1:
raise ValueError("dfreqresp requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
if isinstance(system, TransferFunction):
# Convert numerator and denominator from polynomials in the variable
# 'z' to polynomials in the variable 'z^-1', as freqz expects.
num, den = TransferFunction._z_to_zinv(system.num.ravel(), system.den)
w, h = freqz(num, den, worN=worN, whole=whole)
elif isinstance(system, ZerosPolesGain):
w, h = freqz_zpk(system.zeros, system.poles, system.gain, worN=worN,
whole=whole)
return w, h
def dbode(system, w=None, n=100):
r"""
Calculate Bode magnitude and phase data of a discrete-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 1 (instance of `dlti`)
* 2 (num, den, dt)
* 3 (zeros, poles, gain, dt)
* 4 (A, B, C, D, dt)
w : array_like, optional
Array of frequencies (in radians/sample). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/time_unit]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
If (num, den) is passed in for ``system``, coefficients for both the
numerator and denominator should be specified in descending exponent
order (e.g. ``z^2 + 3z + 5`` would be represented as ``[1, 3, 5]``).
.. versionadded:: 0.18.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Construct the transfer function :math:`H(z) = \frac{1}{z^2 + 2z + 3}` with
a sampling time of 0.05 seconds:
>>> sys = signal.TransferFunction([1], [1, 2, 3], dt=0.05)
Equivalent: sys.bode()
>>> w, mag, phase = signal.dbode(sys)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = dfreqresp(system, w=w, n=n)
if isinstance(system, dlti):
dt = system.dt
else:
dt = system[-1]
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.rad2deg(numpy.unwrap(numpy.angle(y)))
return w / dt, mag, phase
| bsd-3-clause |
Cadair/ginga | ginga/web/pgw/Plot.py | 3 | 4306 | #
# Plot.py -- Plotting widget canvas wrapper.
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from io import BytesIO
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from ginga.web.pgw import Widgets
class PlotWidget(Widgets.Canvas):
"""
This class implements the server-side backend of the surface for a
web-based plot viewer. It uses a web socket to connect to an HTML5
canvas with javascript callbacks in a web browser on the client.
The viewer is created separately on the backend and connects to this
surface via the set_viewer() method.
"""
def __init__(self, plot, width=500, height=500):
super(PlotWidget, self).__init__(width=width, height=height)
self.widget = FigureCanvas(plot.get_figure())
self.logger = plot.logger
self._configured = False
self.refresh_delay = 0.010
self.set_plot(plot)
def set_plot(self, plot):
self.logger.debug("set_plot called")
self.plot = plot
self._dispatch_event_table = {
"activate": self.ignore_event,
"setbounds": self.map_event_cb,
"mousedown": self.ignore_event,
"mouseup": self.ignore_event,
"mousemove": self.ignore_event,
"mouseout": self.ignore_event,
"mouseover": self.ignore_event,
"mousewheel": self.ignore_event,
"wheel": self.ignore_event,
"click": self.ignore_event,
"dblclick": self.ignore_event,
"keydown": self.ignore_event,
"keyup": self.ignore_event,
"keypress": self.ignore_event,
"resize": self.resize_event,
"focus": self.ignore_event,
"focusout": self.ignore_event,
"blur": self.ignore_event,
"drop": self.ignore_event,
"paste": self.ignore_event,
# Hammer.js events
"pinch": self.ignore_event,
"pinchstart": self.ignore_event,
"pinchend": self.ignore_event,
"rotate": self.ignore_event,
"rotatestart": self.ignore_event,
"rotateend": self.ignore_event,
"tap": self.ignore_event,
"pan": self.ignore_event,
"panstart": self.ignore_event,
"panend": self.ignore_event,
"swipe": self.ignore_event,
}
self.plot.add_callback('draw-canvas', self.draw_cb)
self.add_timer('refresh', self.refresh_cb)
def get_plot(self):
return self.plot
def ignore_event(self, event):
pass
def refresh_cb(self):
app = self.get_app()
app.do_operation('refresh_canvas', id=self.id)
self.reset_timer('refresh', self.refresh_delay)
def get_rgb_buffer(self, plot):
buf = BytesIO()
fig = plot.get_figure()
fig.canvas.print_figure(buf, format='png')
wd, ht = self.width, self.height
return (wd, ht, buf.getvalue())
def draw_cb(self, plot):
self.logger.debug("getting RGB buffer")
wd, ht, buf = self.get_rgb_buffer(plot)
#self.logger.debug("clear_rect")
#self.clear_rect(0, 0, wd, ht)
self.logger.debug("drawing %dx%d image" % (wd, ht))
self.draw_image(buf, 0, 0, wd, ht)
self.reset_timer('refresh', self.refresh_delay)
def configure_window(self, wd, ht):
self.logger.debug("canvas resized to %dx%d" % (wd, ht))
fig = self.plot.get_figure()
fig.set_size_inches(float(wd) / fig.dpi, float(ht) / fig.dpi)
def map_event_cb(self, event):
wd, ht = event.width, event.height
self.configure_window(wd, ht)
self.plot.draw()
def resize_event(self, event):
wd, ht = event.x, event.y
self.configure_window(wd, ht)
self.plot.draw()
def _cb_redirect(self, event):
method = self._dispatch_event_table[event.type]
try:
method(event)
except Exception as e:
self.logger.error("error redirecting '%s' event: %s" % (
event.type, str(e)))
# TODO: dump traceback to debug log
#END
| bsd-3-clause |
bharatsingh430/py-R-FCN-multiGPU | caffe/examples/web_demo/app.py | 41 | 7793 | import os
import time
import cPickle
import datetime
import logging
import flask
import werkzeug
import optparse
import tornado.wsgi
import tornado.httpserver
import numpy as np
import pandas as pd
from PIL import Image
import cStringIO as StringIO
import urllib
import exifutil
import caffe
REPO_DIRNAME = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/../..')
UPLOAD_FOLDER = '/tmp/caffe_demos_uploads'
ALLOWED_IMAGE_EXTENSIONS = set(['png', 'bmp', 'jpg', 'jpe', 'jpeg', 'gif'])
# Obtain the flask app object
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html', has_result=False)
@app.route('/classify_url', methods=['GET'])
def classify_url():
imageurl = flask.request.args.get('imageurl', '')
try:
string_buffer = StringIO.StringIO(
urllib.urlopen(imageurl).read())
image = caffe.io.load_image(string_buffer)
except Exception as err:
# For any exception we encounter in reading the image, we will just
# not continue.
logging.info('URL Image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open image from URL.')
)
logging.info('Image: %s', imageurl)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result, imagesrc=imageurl)
@app.route('/classify_upload', methods=['POST'])
def classify_upload():
try:
# We will save the file to disk for possible data collection.
imagefile = flask.request.files['imagefile']
filename_ = str(datetime.datetime.now()).replace(' ', '_') + \
werkzeug.secure_filename(imagefile.filename)
filename = os.path.join(UPLOAD_FOLDER, filename_)
imagefile.save(filename)
logging.info('Saving to %s.', filename)
image = exifutil.open_oriented_im(filename)
except Exception as err:
logging.info('Uploaded image open error: %s', err)
return flask.render_template(
'index.html', has_result=True,
result=(False, 'Cannot open uploaded image.')
)
result = app.clf.classify_image(image)
return flask.render_template(
'index.html', has_result=True, result=result,
imagesrc=embed_image_html(image)
)
def embed_image_html(image):
"""Creates an image embedded in HTML base64 format."""
image_pil = Image.fromarray((255 * image).astype('uint8'))
image_pil = image_pil.resize((256, 256))
string_buf = StringIO.StringIO()
image_pil.save(string_buf, format='png')
data = string_buf.getvalue().encode('base64').replace('\n', '')
return 'data:image/png;base64,' + data
def allowed_file(filename):
return (
'.' in filename and
filename.rsplit('.', 1)[1] in ALLOWED_IMAGE_EXTENSIONS
)
class ImagenetClassifier(object):
default_args = {
'model_def_file': (
'{}/models/bvlc_reference_caffenet/deploy.prototxt'.format(REPO_DIRNAME)),
'pretrained_model_file': (
'{}/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'.format(REPO_DIRNAME)),
'mean_file': (
'{}/python/caffe/imagenet/ilsvrc_2012_mean.npy'.format(REPO_DIRNAME)),
'class_labels_file': (
'{}/data/ilsvrc12/synset_words.txt'.format(REPO_DIRNAME)),
'bet_file': (
'{}/data/ilsvrc12/imagenet.bet.pickle'.format(REPO_DIRNAME)),
}
for key, val in default_args.iteritems():
if not os.path.exists(val):
raise Exception(
"File for {} is missing. Should be at: {}".format(key, val))
default_args['image_dim'] = 256
default_args['raw_scale'] = 255.
def __init__(self, model_def_file, pretrained_model_file, mean_file,
raw_scale, class_labels_file, bet_file, image_dim, gpu_mode):
logging.info('Loading net and associated files...')
if gpu_mode:
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.net = caffe.Classifier(
model_def_file, pretrained_model_file,
image_dims=(image_dim, image_dim), raw_scale=raw_scale,
mean=np.load(mean_file).mean(1).mean(1), channel_swap=(2, 1, 0)
)
with open(class_labels_file) as f:
labels_df = pd.DataFrame([
{
'synset_id': l.strip().split(' ')[0],
'name': ' '.join(l.strip().split(' ')[1:]).split(',')[0]
}
for l in f.readlines()
])
self.labels = labels_df.sort('synset_id')['name'].values
self.bet = cPickle.load(open(bet_file))
# A bias to prefer children nodes in single-chain paths
# I am setting the value to 0.1 as a quick, simple model.
# We could use better psychological models here...
self.bet['infogain'] -= np.array(self.bet['preferences']) * 0.1
def classify_image(self, image):
try:
starttime = time.time()
scores = self.net.predict([image], oversample=True).flatten()
endtime = time.time()
indices = (-scores).argsort()[:5]
predictions = self.labels[indices]
# In addition to the prediction text, we will also produce
# the length for the progress bar visualization.
meta = [
(p, '%.5f' % scores[i])
for i, p in zip(indices, predictions)
]
logging.info('result: %s', str(meta))
# Compute expected information gain
expected_infogain = np.dot(
self.bet['probmat'], scores[self.bet['idmapping']])
expected_infogain *= self.bet['infogain']
# sort the scores
infogain_sort = expected_infogain.argsort()[::-1]
bet_result = [(self.bet['words'][v], '%.5f' % expected_infogain[v])
for v in infogain_sort[:5]]
logging.info('bet result: %s', str(bet_result))
return (True, meta, bet_result, '%.3f' % (endtime - starttime))
except Exception as err:
logging.info('Classification error: %s', err)
return (False, 'Something went wrong when classifying the '
'image. Maybe try another one?')
def start_tornado(app, port=5000):
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(port)
print("Tornado server starting on port {}".format(port))
tornado.ioloop.IOLoop.instance().start()
def start_from_terminal(app):
"""
Parse command line options and start the server.
"""
parser = optparse.OptionParser()
parser.add_option(
'-d', '--debug',
help="enable debug mode",
action="store_true", default=False)
parser.add_option(
'-p', '--port',
help="which port to serve content on",
type='int', default=5000)
parser.add_option(
'-g', '--gpu',
help="use gpu mode",
action='store_true', default=False)
opts, args = parser.parse_args()
ImagenetClassifier.default_args.update({'gpu_mode': opts.gpu})
# Initialize classifier + warm start by forward for allocation
app.clf = ImagenetClassifier(**ImagenetClassifier.default_args)
app.clf.net.forward()
if opts.debug:
app.run(debug=True, host='0.0.0.0', port=opts.port)
else:
start_tornado(app, opts.port)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
if not os.path.exists(UPLOAD_FOLDER):
os.makedirs(UPLOAD_FOLDER)
start_from_terminal(app)
| mit |
amolkahat/pandas | pandas/tests/indexes/datetimes/test_formats.py | 5 | 8703 | from datetime import datetime
from pandas import DatetimeIndex, Series
import numpy as np
import dateutil.tz
import pytz
import pytest
import pandas.util.testing as tm
import pandas as pd
def test_to_native_types():
index = DatetimeIndex(freq='1D', periods=3, start='2017-01-01')
# First, with no arguments.
expected = np.array(['2017-01-01', '2017-01-02',
'2017-01-03'], dtype=object)
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
# No NaN values, so na_rep has no effect
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
# Make sure slicing works
expected = np.array(['2017-01-01', '2017-01-03'], dtype=object)
result = index.to_native_types([0, 2])
tm.assert_numpy_array_equal(result, expected)
# Make sure date formatting works
expected = np.array(['01-2017-01', '01-2017-02',
'01-2017-03'], dtype=object)
result = index.to_native_types(date_format='%m-%Y-%d')
tm.assert_numpy_array_equal(result, expected)
# NULL object handling should work
index = DatetimeIndex(['2017-01-01', pd.NaT, '2017-01-03'])
expected = np.array(['2017-01-01', 'NaT', '2017-01-03'], dtype=object)
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
expected = np.array(['2017-01-01', 'pandas',
'2017-01-03'], dtype=object)
result = index.to_native_types(na_rep='pandas')
tm.assert_numpy_array_equal(result, expected)
class TestDatetimeIndexRendering(object):
def test_dti_repr_short(self):
dr = pd.date_range(start='1/1/2012', periods=1)
repr(dr)
dr = pd.date_range(start='1/1/2012', periods=2)
repr(dr)
dr = pd.date_range(start='1/1/2012', periods=3)
repr(dr)
@pytest.mark.parametrize('method', ['__repr__', '__unicode__', '__str__'])
def test_dti_representation(self, method):
idxs = []
idxs.append(DatetimeIndex([], freq='D'))
idxs.append(DatetimeIndex(['2011-01-01'], freq='D'))
idxs.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idxs.append(DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq='D'))
idxs.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'
], freq='H', tz='Asia/Tokyo'))
idxs.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='US/Eastern'))
idxs.append(DatetimeIndex(
['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT], tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', "
"freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='datetime64[ns]', freq='D')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+09:00', "
"'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"
", dtype='datetime64[ns, Asia/Tokyo]', freq='H')")
exp.append("DatetimeIndex(['2011-01-01 09:00:00-05:00', "
"'2011-01-01 10:00:00-05:00', 'NaT'], "
"dtype='datetime64[ns, US/Eastern]', freq=None)")
exp.append("DatetimeIndex(['2011-01-01 09:00:00+00:00', "
"'2011-01-01 10:00:00+00:00', 'NaT'], "
"dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idxs, exp):
result = getattr(indx, method)()
assert result == expected
def test_dti_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = ("0 2011-01-01\n"
"dtype: datetime64[ns]")
exp3 = ("0 2011-01-01\n"
"1 2011-01-02\n"
"dtype: datetime64[ns]")
exp4 = ("0 2011-01-01\n"
"1 2011-01-02\n"
"2 2011-01-03\n"
"dtype: datetime64[ns]")
exp5 = ("0 2011-01-01 09:00:00+09:00\n"
"1 2011-01-01 10:00:00+09:00\n"
"2 2011-01-01 11:00:00+09:00\n"
"dtype: datetime64[ns, Asia/Tokyo]")
exp6 = ("0 2011-01-01 09:00:00-05:00\n"
"1 2011-01-01 10:00:00-05:00\n"
"2 NaT\n"
"dtype: datetime64[ns, US/Eastern]")
exp7 = ("0 2011-01-01 09:00:00\n"
"1 2011-01-02 10:15:00\n"
"dtype: datetime64[ns]")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4,
idx5, idx6, idx7],
[exp1, exp2, exp3, exp4,
exp5, exp6, exp7]):
result = repr(Series(idx))
assert result == expected
def test_dti_summary(self):
# GH#9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(
['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = ("DatetimeIndex: 0 entries\n"
"Freq: D")
exp2 = ("DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01\n"
"Freq: D")
exp3 = ("DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02\n"
"Freq: D")
exp4 = ("DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03\n"
"Freq: D")
exp5 = ("DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "
"to 2011-01-01 11:00:00+09:00\n"
"Freq: H")
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx._summary()
assert result == expected
def test_dti_business_repr(self):
# only really care that it works
repr(pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1)))
def test_dti_business_summary(self):
rng = pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1))
rng._summary()
rng[2:2]._summary()
def test_dti_business_summary_pytz(self):
pd.bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc)._summary()
def test_dti_business_summary_dateutil(self):
pd.bdate_range('1/1/2005', '1/1/2009',
tz=dateutil.tz.tzutc())._summary()
def test_dti_custom_business_repr(self):
# only really care that it works
repr(pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1),
freq='C'))
def test_dti_custom_business_summary(self):
rng = pd.bdate_range(datetime(2009, 1, 1), datetime(2010, 1, 1),
freq='C')
rng._summary()
rng[2:2]._summary()
def test_dti_custom_business_summary_pytz(self):
pd.bdate_range('1/1/2005', '1/1/2009', freq='C',
tz=pytz.utc)._summary()
def test_dti_custom_business_summary_dateutil(self):
pd.bdate_range('1/1/2005', '1/1/2009', freq='C',
tz=dateutil.tz.tzutc())._summary()
| bsd-3-clause |
lenovor/scikit-learn | sklearn/tests/test_metaestimators.py | 226 | 4954 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
draekko/androguard | elsim/elsim/elsim.py | 37 | 16175 | # This file is part of Elsim
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Elsim is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Elsim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Elsim. If not, see <http://www.gnu.org/licenses/>.
import logging
ELSIM_VERSION = 0.2
log_elsim = logging.getLogger("elsim")
console_handler = logging.StreamHandler()
console_handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
log_elsim.addHandler(console_handler)
log_runtime = logging.getLogger("elsim.runtime") # logs at runtime
log_interactive = logging.getLogger("elsim.interactive") # logs in interactive functions
log_loading = logging.getLogger("elsim.loading") # logs when loading
def set_debug():
log_elsim.setLevel( logging.DEBUG )
def get_debug():
return log_elsim.getEffectiveLevel() == logging.DEBUG
def warning(x):
log_runtime.warning(x)
def error(x):
log_runtime.error(x)
raise()
def debug(x):
log_runtime.debug(x)
from similarity.similarity import *
FILTER_ELEMENT_METH = "FILTER_ELEMENT_METH"
FILTER_CHECKSUM_METH = "FILTER_CHECKSUM_METH" # function to checksum an element
FILTER_SIM_METH = "FILTER_SIM_METH" # function to calculate the similarity between two elements
FILTER_SORT_METH = "FILTER_SORT_METH" # function to sort all similar elements
FILTER_SORT_VALUE = "FILTER_SORT_VALUE" # value which used in the sort method to eliminate not interesting comparisons
FILTER_SKIPPED_METH = "FILTER_SKIPPED_METH" # object to skip elements
FILTER_SIM_VALUE_METH = "FILTER_SIM_VALUE_METH" # function to modify values of the similarity
BASE = "base"
ELEMENTS = "elements"
HASHSUM = "hashsum"
SIMILAR_ELEMENTS = "similar_elements"
HASHSUM_SIMILAR_ELEMENTS = "hash_similar_elements"
NEW_ELEMENTS = "newelements"
HASHSUM_NEW_ELEMENTS = "hash_new_elements"
DELETED_ELEMENTS = "deletedelements"
IDENTICAL_ELEMENTS = "identicalelements"
INTERNAL_IDENTICAL_ELEMENTS = "internal identical elements"
SKIPPED_ELEMENTS = "skippedelements"
SIMILARITY_ELEMENTS = "similarity_elements"
SIMILARITY_SORT_ELEMENTS = "similarity_sort_elements"
class ElsimNeighbors(object):
def __init__(self, x, ys):
import numpy as np
from sklearn.neighbors import NearestNeighbors
#print x, ys
CI = np.array( [x.checksum.get_signature_entropy(), x.checksum.get_entropy()] )
#print CI, x.get_info()
#print
for i in ys:
CI = np.vstack( (CI, [i.checksum.get_signature_entropy(), i.checksum.get_entropy()]) )
#idx = 0
#for i in np.array(CI)[1:]:
# print idx+1, i, ys[idx].get_info()
# idx += 1
self.neigh = NearestNeighbors(2, 0.4)
self.neigh.fit(np.array(CI))
#print self.neigh.kneighbors( CI[0], len(CI) )
self.CI = CI
self.ys = ys
def cmp_elements(self):
z = self.neigh.kneighbors( self.CI[0], 5 )
l = []
cmp_values = z[0][0]
cmp_elements = z[1][0]
idx = 1
for i in cmp_elements[1:]:
#if cmp_values[idx] > 1.0:
# break
#print i, cmp_values[idx], self.ys[ i - 1 ].get_info()
l.append( self.ys[ i - 1 ] )
idx += 1
return l
def split_elements(el, els):
e1 = {}
for i in els:
e1[ i ] = el.get_associated_element( i )
return e1
####
# elements : entropy raw, hash, signature
#
# set elements : hash
# hash table elements : hash --> element
class Elsim(object):
def __init__(self, e1, e2, F, T=None, C=None, libnative=True, libpath="elsim/elsim/similarity/libsimilarity/libsimilarity.so"):
self.e1 = e1
self.e2 = e2
self.F = F
self.compressor = SNAPPY_COMPRESS
set_debug()
if T != None:
self.F[ FILTER_SORT_VALUE ] = T
if isinstance(libnative, str):
libpath = libnative
libnative = True
self.sim = SIMILARITY( libpath, libnative )
if C != None:
if C in H_COMPRESSOR:
self.compressor = H_COMPRESSOR[ C ]
self.sim.set_compress_type( self.compressor )
else:
self.sim.set_compress_type( self.compressor )
self.filters = {}
self._init_filters()
self._init_index_elements()
self._init_similarity()
self._init_sort_elements()
self._init_new_elements()
def _init_filters(self):
self.filters = {}
self.filters[ BASE ] = {}
self.filters[ BASE ].update( self.F )
self.filters[ ELEMENTS ] = {}
self.filters[ HASHSUM ] = {}
self.filters[ IDENTICAL_ELEMENTS ] = set()
self.filters[ SIMILAR_ELEMENTS ] = []
self.filters[ HASHSUM_SIMILAR_ELEMENTS ] = []
self.filters[ NEW_ELEMENTS ] = set()
self.filters[ HASHSUM_NEW_ELEMENTS ] = []
self.filters[ DELETED_ELEMENTS ] = []
self.filters[ SKIPPED_ELEMENTS ] = []
self.filters[ ELEMENTS ][ self.e1 ] = []
self.filters[ HASHSUM ][ self.e1 ] = []
self.filters[ ELEMENTS ][ self.e2 ] = []
self.filters[ HASHSUM ][ self.e2 ] = []
self.filters[ SIMILARITY_ELEMENTS ] = {}
self.filters[ SIMILARITY_SORT_ELEMENTS ] = {}
self.set_els = {}
self.ref_set_els = {}
self.ref_set_ident = {}
def _init_index_elements(self):
self.__init_index_elements( self.e1, 1 )
self.__init_index_elements( self.e2 )
def __init_index_elements(self, ce, init=0):
self.set_els[ ce ] = set()
self.ref_set_els[ ce ] = {}
self.ref_set_ident[ce] = {}
for ae in ce.get_elements():
e = self.filters[BASE][FILTER_ELEMENT_METH]( ae, ce )
if self.filters[BASE][FILTER_SKIPPED_METH].skip( e ):
self.filters[ SKIPPED_ELEMENTS ].append( e )
continue
self.filters[ ELEMENTS ][ ce ].append( e )
fm = self.filters[ BASE ][ FILTER_CHECKSUM_METH ]( e, self.sim )
e.set_checksum( fm )
sha256 = e.getsha256()
self.filters[ HASHSUM ][ ce ].append( sha256 )
if sha256 not in self.set_els[ ce ]:
self.set_els[ ce ].add( sha256 )
self.ref_set_els[ ce ][ sha256 ] = e
self.ref_set_ident[ce][sha256] = []
self.ref_set_ident[ce][sha256].append(e)
def _init_similarity(self):
intersection_elements = self.set_els[ self.e2 ].intersection( self.set_els[ self.e1 ] )
difference_elements = self.set_els[ self.e2 ].difference( intersection_elements )
self.filters[IDENTICAL_ELEMENTS].update([ self.ref_set_els[ self.e1 ][ i ] for i in intersection_elements ])
available_e2_elements = [ self.ref_set_els[ self.e2 ][ i ] for i in difference_elements ]
# Check if some elements in the first file has been modified
for j in self.filters[ELEMENTS][self.e1]:
self.filters[ SIMILARITY_ELEMENTS ][ j ] = {}
#debug("SIM FOR %s" % (j.get_info()))
if j.getsha256() not in self.filters[HASHSUM][self.e2]:
#eln = ElsimNeighbors( j, available_e2_elements )
#for k in eln.cmp_elements():
for k in available_e2_elements:
#debug("%s" % k.get_info())
self.filters[SIMILARITY_ELEMENTS][ j ][ k ] = self.filters[BASE][FILTER_SIM_METH]( self.sim, j, k )
if j.getsha256() not in self.filters[HASHSUM_SIMILAR_ELEMENTS]:
self.filters[SIMILAR_ELEMENTS].append(j)
self.filters[HASHSUM_SIMILAR_ELEMENTS].append( j.getsha256() )
def _init_sort_elements(self):
deleted_elements = []
for j in self.filters[SIMILAR_ELEMENTS]:
#debug("SORT FOR %s" % (j.get_info()))
sort_h = self.filters[BASE][FILTER_SORT_METH]( j, self.filters[SIMILARITY_ELEMENTS][ j ], self.filters[BASE][FILTER_SORT_VALUE] )
self.filters[SIMILARITY_SORT_ELEMENTS][ j ] = set( i[0] for i in sort_h )
ret = True
if sort_h == []:
ret = False
if ret == False:
deleted_elements.append( j )
for j in deleted_elements:
self.filters[ DELETED_ELEMENTS ].append( j )
self.filters[ SIMILAR_ELEMENTS ].remove( j )
def __checksort(self, x, y):
return y in self.filters[SIMILARITY_SORT_ELEMENTS][ x ]
def _init_new_elements(self):
# Check if some elements in the second file are totally new !
for j in self.filters[ELEMENTS][self.e2]:
# new elements can't be in similar elements
if j not in self.filters[SIMILAR_ELEMENTS]:
# new elements hashes can't be in first file
if j.getsha256() not in self.filters[HASHSUM][self.e1]:
ok = True
# new elements can't be compared to another one
for diff_element in self.filters[SIMILAR_ELEMENTS]:
if self.__checksort( diff_element, j ):
ok = False
break
if ok:
if j.getsha256() not in self.filters[HASHSUM_NEW_ELEMENTS]:
self.filters[NEW_ELEMENTS].add( j )
self.filters[HASHSUM_NEW_ELEMENTS].append( j.getsha256() )
def get_similar_elements(self):
""" Return the similar elements
@rtype : a list of elements
"""
return self.get_elem( SIMILAR_ELEMENTS )
def get_new_elements(self):
""" Return the new elements
@rtype : a list of elements
"""
return self.get_elem( NEW_ELEMENTS )
def get_deleted_elements(self):
""" Return the deleted elements
@rtype : a list of elements
"""
return self.get_elem( DELETED_ELEMENTS )
def get_internal_identical_elements(self, ce):
""" Return the internal identical elements
@rtype : a list of elements
"""
return self.get_elem( INTERNAL_IDENTICAL_ELEMENTS )
def get_identical_elements(self):
""" Return the identical elements
@rtype : a list of elements
"""
return self.get_elem( IDENTICAL_ELEMENTS )
def get_skipped_elements(self):
return self.get_elem( SKIPPED_ELEMENTS )
def get_elem(self, attr):
return [ x for x in self.filters[attr] ]
def show_element(self, i, details=True):
print "\t", i.get_info()
if details:
if i.getsha256() == None:
pass
elif i.getsha256() in self.ref_set_els[self.e2]:
if len(self.ref_set_ident[self.e2][i.getsha256()]) > 1:
for ident in self.ref_set_ident[self.e2][i.getsha256()]:
print "\t\t-->", ident.get_info()
else:
print "\t\t-->", self.ref_set_els[self.e2][ i.getsha256() ].get_info()
else:
for j in self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ]:
print "\t\t-->", j.get_info(), self.filters[ SIMILARITY_ELEMENTS ][ i ][ j ]
def get_element_info(self, i):
l = []
if i.getsha256() == None:
pass
elif i.getsha256() in self.ref_set_els[self.e2]:
l.append( [ i, self.ref_set_els[self.e2][ i.getsha256() ] ] )
else:
for j in self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ]:
l.append( [i, j, self.filters[ SIMILARITY_ELEMENTS ][ i ][ j ] ] )
return l
def get_associated_element(self, i):
return list(self.filters[ SIMILARITY_SORT_ELEMENTS ][ i ])[0]
def get_similarity_value(self, new=True):
values = []
self.sim.set_compress_type( BZ2_COMPRESS )
for j in self.filters[SIMILAR_ELEMENTS]:
k = self.get_associated_element( j )
value = self.filters[BASE][FILTER_SIM_METH]( self.sim, j, k )
# filter value
value = self.filters[BASE][FILTER_SIM_VALUE_METH]( value )
values.append( value )
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 0.0 ) for i in self.filters[IDENTICAL_ELEMENTS] ] )
if new == True:
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 1.0 ) for i in self.filters[NEW_ELEMENTS] ] )
else:
values.extend( [ self.filters[BASE][FILTER_SIM_VALUE_METH]( 1.0 ) for i in self.filters[DELETED_ELEMENTS] ] )
self.sim.set_compress_type( self.compressor )
similarity_value = 0.0
for i in values:
similarity_value += (1.0 - i)
if len(values) == 0:
return 0.0
return (similarity_value/len(values)) * 100
def show(self):
print "Elements:"
print "\t IDENTICAL:\t", len(self.get_identical_elements())
print "\t SIMILAR: \t", len(self.get_similar_elements())
print "\t NEW:\t\t", len(self.get_new_elements())
print "\t DELETED:\t", len(self.get_deleted_elements())
print "\t SKIPPED:\t", len(self.get_skipped_elements())
#self.sim.show()
ADDED_ELEMENTS = "added elements"
DELETED_ELEMENTS = "deleted elements"
LINK_ELEMENTS = "link elements"
DIFF = "diff"
class Eldiff(object):
def __init__(self, elsim, F):
self.elsim = elsim
self.F = F
self._init_filters()
self._init_diff()
def _init_filters(self):
self.filters = {}
self.filters[ BASE ] = {}
self.filters[ BASE ].update( self.F )
self.filters[ ELEMENTS ] = {}
self.filters[ ADDED_ELEMENTS ] = {}
self.filters[ DELETED_ELEMENTS ] = {}
self.filters[ LINK_ELEMENTS ] = {}
def _init_diff(self):
for i, j in self.elsim.get_elements():
self.filters[ ADDED_ELEMENTS ][ j ] = []
self.filters[ DELETED_ELEMENTS ][ i ] = []
x = self.filters[ BASE ][ DIFF ]( i, j )
self.filters[ ADDED_ELEMENTS ][ j ].extend( x.get_added_elements() )
self.filters[ DELETED_ELEMENTS ][ i ].extend( x.get_deleted_elements() )
self.filters[ LINK_ELEMENTS ][ j ] = i
#self.filters[ LINK_ELEMENTS ][ i ] = j
def show(self):
for bb in self.filters[ LINK_ELEMENTS ] : #print "la"
print bb.get_info(), self.filters[ LINK_ELEMENTS ][ bb ].get_info()
print "Added Elements(%d)" % (len(self.filters[ ADDED_ELEMENTS ][ bb ]))
for i in self.filters[ ADDED_ELEMENTS ][ bb ]:
print "\t",
i.show()
print "Deleted Elements(%d)" % (len(self.filters[ DELETED_ELEMENTS ][ self.filters[ LINK_ELEMENTS ][ bb ] ]))
for i in self.filters[ DELETED_ELEMENTS ][ self.filters[ LINK_ELEMENTS ][ bb ] ]:
print "\t",
i.show()
print
def get_added_elements(self):
return self.filters[ ADDED_ELEMENTS ]
def get_deleted_elements(self):
return self.filters[ DELETED_ELEMENTS ]
| apache-2.0 |
iismd17/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
krez13/scikit-learn | sklearn/neighbors/regression.py | 32 | 11019 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/tests/tseries/test_offsets.py | 6 | 216497 | import os
from distutils.version import LooseVersion
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta
import pytest
from pandas.compat import range, iteritems
from pandas import compat
import numpy as np
from pandas.compat.numpy import np_datetime64_compat
from pandas.core.series import Series
from pandas.tseries.frequencies import (_offset_map, get_freq_code,
_get_freq_str, _INVALID_FREQ_ERROR,
get_offset, get_standard_freq)
from pandas.core.indexes.datetimes import (
_to_m8, DatetimeIndex, _daterange_cache)
from pandas.tseries.offsets import (BDay, CDay, BQuarterEnd, BMonthEnd,
BusinessHour, WeekOfMonth, CBMonthEnd,
CustomBusinessHour, WeekDay,
CBMonthBegin, BYearEnd, MonthEnd,
MonthBegin, SemiMonthBegin, SemiMonthEnd,
BYearBegin, QuarterBegin, BQuarterBegin,
BMonthBegin, DateOffset, Week, YearBegin,
YearEnd, Hour, Minute, Second, Day, Micro,
QuarterEnd, BusinessMonthEnd, FY5253,
Milli, Nano, Easter, FY5253Quarter,
LastWeekOfMonth, CacheableOffset)
from pandas.core.tools.datetimes import (
format, ole2datetime, parse_time_string,
to_datetime, DateParseError)
import pandas.tseries.offsets as offsets
from pandas.io.pickle import read_pickle
from pandas._libs.tslib import normalize_date, NaT, Timestamp, Timedelta
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.tseries.holiday import USFederalHolidayCalendar
def test_monthrange():
import calendar
for y in range(2000, 2013):
for m in range(1, 13):
assert tslib.monthrange(y, m) == calendar.monthrange(y, m)
####
# Misc function tests
####
def test_format():
actual = format(datetime(2008, 1, 15))
assert actual == '20080115'
def test_ole2datetime():
actual = ole2datetime(60000)
assert actual == datetime(2064, 4, 8)
with pytest.raises(ValueError):
ole2datetime(60)
def test_to_datetime1():
actual = to_datetime(datetime(2008, 1, 15))
assert actual == datetime(2008, 1, 15)
actual = to_datetime('20080115')
assert actual == datetime(2008, 1, 15)
# unparseable
s = 'Month 1, 1999'
assert to_datetime(s, errors='ignore') == s
def test_normalize_date():
actual = normalize_date(datetime(2007, 10, 1, 1, 12, 5, 10))
assert actual == datetime(2007, 10, 1)
def test_to_m8():
valb = datetime(2007, 10, 1)
valu = _to_m8(valb)
assert isinstance(valu, np.datetime64)
# assert valu == np.datetime64(datetime(2007,10,1))
# def test_datetime64_box():
# valu = np.datetime64(datetime(2007,10,1))
# valb = _dt_box(valu)
# assert type(valb) == datetime
# assert valb == datetime(2007,10,1)
#####
# DateOffset Tests
#####
class Base(object):
_offset = None
_offset_types = [getattr(offsets, o) for o in offsets.__all__]
timezones = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Tokyo', 'dateutil/US/Pacific']
@property
def offset_types(self):
return self._offset_types
def _get_offset(self, klass, value=1, normalize=False):
# create instance from offset class
if klass is FY5253 or klass is FY5253Quarter:
klass = klass(n=value, startingMonth=1, weekday=1,
qtr_with_extra_week=1, variation='last',
normalize=normalize)
elif klass is LastWeekOfMonth:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is WeekOfMonth:
klass = klass(n=value, week=1, weekday=5, normalize=normalize)
elif klass is Week:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is DateOffset:
klass = klass(days=value, normalize=normalize)
else:
try:
klass = klass(value, normalize=normalize)
except:
klass = klass(normalize=normalize)
return klass
def test_apply_out_of_range(self):
if self._offset is None:
return
# try to create an out-of-bounds result timestamp; if we can't create
# the offset skip
try:
if self._offset in (BusinessHour, CustomBusinessHour):
# Using 10000 in BusinessHour fails in tz check because of DST
# difference
offset = self._get_offset(self._offset, value=100000)
else:
offset = self._get_offset(self._offset, value=10000)
result = Timestamp('20080101') + offset
assert isinstance(result, datetime)
assert result.tzinfo is None
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
# Check tz is preserved
for tz in self.timezones:
t = Timestamp('20080101', tz=tz)
result = t + offset
assert isinstance(result, datetime)
assert t.tzinfo == result.tzinfo
except (tslib.OutOfBoundsDatetime):
raise
except (ValueError, KeyError) as e:
pytest.skip(
"cannot create out_of_range offset: {0} {1}".format(
str(self).split('.')[-1], e))
class TestCommon(Base):
def setup_method(self, method):
# exected value created by Base._get_offset
# are applied to 2011/01/01 09:00 (Saturday)
# used for .apply and .rollforward
self.expecteds = {'Day': Timestamp('2011-01-02 09:00:00'),
'DateOffset': Timestamp('2011-01-02 09:00:00'),
'BusinessDay': Timestamp('2011-01-03 09:00:00'),
'CustomBusinessDay':
Timestamp('2011-01-03 09:00:00'),
'CustomBusinessMonthEnd':
Timestamp('2011-01-31 09:00:00'),
'CustomBusinessMonthBegin':
Timestamp('2011-01-03 09:00:00'),
'MonthBegin': Timestamp('2011-02-01 09:00:00'),
'BusinessMonthBegin':
Timestamp('2011-01-03 09:00:00'),
'MonthEnd': Timestamp('2011-01-31 09:00:00'),
'SemiMonthEnd': Timestamp('2011-01-15 09:00:00'),
'SemiMonthBegin': Timestamp('2011-01-15 09:00:00'),
'BusinessMonthEnd': Timestamp('2011-01-31 09:00:00'),
'YearBegin': Timestamp('2012-01-01 09:00:00'),
'BYearBegin': Timestamp('2011-01-03 09:00:00'),
'YearEnd': Timestamp('2011-12-31 09:00:00'),
'BYearEnd': Timestamp('2011-12-30 09:00:00'),
'QuarterBegin': Timestamp('2011-03-01 09:00:00'),
'BQuarterBegin': Timestamp('2011-03-01 09:00:00'),
'QuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BQuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BusinessHour': Timestamp('2011-01-03 10:00:00'),
'CustomBusinessHour':
Timestamp('2011-01-03 10:00:00'),
'WeekOfMonth': Timestamp('2011-01-08 09:00:00'),
'LastWeekOfMonth': Timestamp('2011-01-29 09:00:00'),
'FY5253Quarter': Timestamp('2011-01-25 09:00:00'),
'FY5253': Timestamp('2011-01-25 09:00:00'),
'Week': Timestamp('2011-01-08 09:00:00'),
'Easter': Timestamp('2011-04-24 09:00:00'),
'Hour': Timestamp('2011-01-01 10:00:00'),
'Minute': Timestamp('2011-01-01 09:01:00'),
'Second': Timestamp('2011-01-01 09:00:01'),
'Milli': Timestamp('2011-01-01 09:00:00.001000'),
'Micro': Timestamp('2011-01-01 09:00:00.000001'),
'Nano': Timestamp(np_datetime64_compat(
'2011-01-01T09:00:00.000000001Z'))}
def test_return_type(self):
for offset in self.offset_types:
offset = self._get_offset(offset)
# make sure that we are returning a Timestamp
result = Timestamp('20080101') + offset
assert isinstance(result, Timestamp)
# make sure that we are returning NaT
assert NaT + offset is NaT
assert offset + NaT is NaT
assert NaT - offset is NaT
assert (-offset).apply(NaT) is NaT
def test_offset_n(self):
for offset_klass in self.offset_types:
offset = self._get_offset(offset_klass)
assert offset.n == 1
neg_offset = offset * -1
assert neg_offset.n == -1
mul_offset = offset * 3
assert mul_offset.n == 3
def test_offset_freqstr(self):
for offset_klass in self.offset_types:
offset = self._get_offset(offset_klass)
freqstr = offset.freqstr
if freqstr not in ('<Easter>',
"<DateOffset: kwds={'days': 1}>",
'LWOM-SAT', ):
code = get_offset(freqstr)
assert offset.rule_code == code
def _check_offsetfunc_works(self, offset, funcname, dt, expected,
normalize=False):
offset_s = self._get_offset(offset, normalize=normalize)
func = getattr(offset_s, funcname)
result = func(dt)
assert isinstance(result, Timestamp)
assert result == expected
result = func(Timestamp(dt))
assert isinstance(result, Timestamp)
assert result == expected
# see gh-14101
exp_warning = None
ts = Timestamp(dt) + Nano(5)
if (offset_s.__class__.__name__ == 'DateOffset' and
(funcname == 'apply' or normalize) and
ts.nanosecond > 0):
exp_warning = UserWarning
# test nanosecond is preserved
with tm.assert_produces_warning(exp_warning,
check_stacklevel=False):
result = func(ts)
assert isinstance(result, Timestamp)
if normalize is False:
assert result == expected + Nano(5)
else:
assert result == expected
if isinstance(dt, np.datetime64):
# test tz when input is datetime or Timestamp
return
tm._skip_if_no_pytz()
tm._skip_if_no_dateutil()
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
tz_obj = tslib.maybe_get_tz(tz)
dt_tz = tslib._localize_pydatetime(dt, tz_obj)
result = func(dt_tz)
assert isinstance(result, Timestamp)
assert result == expected_localize
result = func(Timestamp(dt, tz=tz))
assert isinstance(result, Timestamp)
assert result == expected_localize
# see gh-14101
exp_warning = None
ts = Timestamp(dt, tz=tz) + Nano(5)
if (offset_s.__class__.__name__ == 'DateOffset' and
(funcname == 'apply' or normalize) and
ts.nanosecond > 0):
exp_warning = UserWarning
# test nanosecond is preserved
with tm.assert_produces_warning(exp_warning,
check_stacklevel=False):
result = func(ts)
assert isinstance(result, Timestamp)
if normalize is False:
assert result == expected_localize + Nano(5)
else:
assert result == expected_localize
def test_apply(self):
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for offset in self.offset_types:
for dt in [sdt, ndt]:
expected = self.expecteds[offset.__name__]
self._check_offsetfunc_works(offset, 'apply', dt, expected)
expected = Timestamp(expected.date())
self._check_offsetfunc_works(offset, 'apply', dt, expected,
normalize=True)
def test_rollforward(self):
expecteds = self.expecteds.copy()
# result will not be changed if the target is on the offset
no_changes = ['Day', 'MonthBegin', 'SemiMonthBegin', 'YearBegin',
'Week', 'Hour', 'Minute', 'Second', 'Milli', 'Micro',
'Nano', 'DateOffset']
for n in no_changes:
expecteds[n] = Timestamp('2011/01/01 09:00')
expecteds['BusinessHour'] = Timestamp('2011-01-03 09:00:00')
expecteds['CustomBusinessHour'] = Timestamp('2011-01-03 09:00:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2011-01-02 00:00:00'),
'DateOffset': Timestamp('2011-01-02 00:00:00'),
'MonthBegin': Timestamp('2011-02-01 00:00:00'),
'SemiMonthBegin': Timestamp('2011-01-15 00:00:00'),
'YearBegin': Timestamp('2012-01-01 00:00:00'),
'Week': Timestamp('2011-01-08 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for offset in self.offset_types:
for dt in [sdt, ndt]:
expected = expecteds[offset.__name__]
self._check_offsetfunc_works(offset, 'rollforward', dt,
expected)
expected = norm_expected[offset.__name__]
self._check_offsetfunc_works(offset, 'rollforward', dt,
expected, normalize=True)
def test_rollback(self):
expecteds = {'BusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthEnd':
Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthBegin':
Timestamp('2010-12-01 09:00:00'),
'BusinessMonthBegin': Timestamp('2010-12-01 09:00:00'),
'MonthEnd': Timestamp('2010-12-31 09:00:00'),
'SemiMonthEnd': Timestamp('2010-12-31 09:00:00'),
'BusinessMonthEnd': Timestamp('2010-12-31 09:00:00'),
'BYearBegin': Timestamp('2010-01-01 09:00:00'),
'YearEnd': Timestamp('2010-12-31 09:00:00'),
'BYearEnd': Timestamp('2010-12-31 09:00:00'),
'QuarterBegin': Timestamp('2010-12-01 09:00:00'),
'BQuarterBegin': Timestamp('2010-12-01 09:00:00'),
'QuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BQuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BusinessHour': Timestamp('2010-12-31 17:00:00'),
'CustomBusinessHour': Timestamp('2010-12-31 17:00:00'),
'WeekOfMonth': Timestamp('2010-12-11 09:00:00'),
'LastWeekOfMonth': Timestamp('2010-12-25 09:00:00'),
'FY5253Quarter': Timestamp('2010-10-26 09:00:00'),
'FY5253': Timestamp('2010-01-26 09:00:00'),
'Easter': Timestamp('2010-04-04 09:00:00')}
# result will not be changed if the target is on the offset
for n in ['Day', 'MonthBegin', 'SemiMonthBegin', 'YearBegin', 'Week',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',
'DateOffset']:
expecteds[n] = Timestamp('2011/01/01 09:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2010-12-31 00:00:00'),
'DateOffset': Timestamp('2010-12-31 00:00:00'),
'MonthBegin': Timestamp('2010-12-01 00:00:00'),
'SemiMonthBegin': Timestamp('2010-12-15 00:00:00'),
'YearBegin': Timestamp('2010-01-01 00:00:00'),
'Week': Timestamp('2010-12-25 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for offset in self.offset_types:
for dt in [sdt, ndt]:
expected = expecteds[offset.__name__]
self._check_offsetfunc_works(offset, 'rollback', dt, expected)
expected = norm_expected[offset.__name__]
self._check_offsetfunc_works(offset, 'rollback', dt, expected,
normalize=True)
def test_onOffset(self):
for offset in self.offset_types:
dt = self.expecteds[offset.__name__]
offset_s = self._get_offset(offset)
assert offset_s.onOffset(dt)
# when normalize=True, onOffset checks time is 00:00:00
offset_n = self._get_offset(offset, normalize=True)
assert not offset_n.onOffset(dt)
if offset in (BusinessHour, CustomBusinessHour):
# In default BusinessHour (9:00-17:00), normalized time
# cannot be in business hour range
continue
date = datetime(dt.year, dt.month, dt.day)
assert offset_n.onOffset(date)
def test_add(self):
dt = datetime(2011, 1, 1, 9, 0)
for offset in self.offset_types:
offset_s = self._get_offset(offset)
expected = self.expecteds[offset.__name__]
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
assert isinstance(result, Timestamp)
assert result == expected
tm._skip_if_no_pytz()
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
assert isinstance(result, Timestamp)
assert result == expected_localize
# normalize=True
offset_s = self._get_offset(offset, normalize=True)
expected = Timestamp(expected.date())
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
assert isinstance(result, Timestamp)
assert result == expected
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
assert isinstance(result, Timestamp)
assert result == expected_localize
def test_pickle_v0_15_2(self):
offsets = {'DateOffset': DateOffset(years=1),
'MonthBegin': MonthBegin(1),
'Day': Day(1),
'YearBegin': YearBegin(1),
'Week': Week(1)}
pickle_path = os.path.join(tm.get_data_path(),
'dateoffset_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
# with open(pickle_path, 'wb') as f: pickle.dump(offsets, f)
#
tm.assert_dict_equal(offsets, read_pickle(pickle_path))
class TestDateOffset(Base):
def setup_method(self, method):
self.d = Timestamp(datetime(2008, 1, 2))
_offset_map.clear()
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert ((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert ((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert ((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert ((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert (DateOffset(months=2).copy() == DateOffset(months=2))
def test_eq(self):
offset1 = DateOffset(days=1)
offset2 = DateOffset(days=365)
assert offset1 != offset2
class TestBusinessDay(Base):
_offset = BDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset2 = BDay(2)
def test_different_normalize_equals(self):
# equivalent in this special case
offset = BDay()
offset2 = BDay()
offset2.normalize = True
assert offset == offset2
def test_repr(self):
assert repr(self.offset) == '<BusinessDay>'
assert repr(self.offset2) == '<2 * BusinessDays>'
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 1, 3)
def testRAdd(self):
assert self.d + self.offset2 == self.offset2 + self.d
def testSub(self):
off = self.offset2
pytest.raises(Exception, off.__sub__, self.d)
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + BDay(-2)
def testRSub(self):
assert self.d - self.offset2 == (-self.offset2).apply(self.d)
def testMult1(self):
assert self.d + 10 * self.offset == self.d + BDay(10)
def testMult2(self):
assert self.d + (-5 * BDay(-10)) == self.d + BDay(50)
def testRollback1(self):
assert BDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert (BDay(10).rollback(datetime(2008, 1, 5)) ==
datetime(2008, 1, 4))
def testRollforward1(self):
assert BDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert (BDay(10).rollforward(datetime(2008, 1, 5)) ==
datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = BDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, d, expected in tests:
assertOnOffset(offset, d, expected)
def test_apply(self):
tests = []
tests.append((BDay(), {datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2 * BDay(), {datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}
))
tests.append((-BDay(), {datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}
))
tests.append((-2 * BDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}
))
tests.append((BDay(0), {datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}
))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + BDay(10)
assert result == datetime(2012, 11, 6)
result = dt + BDay(100) - BDay(100)
assert result == dt
off = BDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
off = BDay() * 10
rs = datetime(2014, 1, 5) + off # see #5890
xp = datetime(2014, 1, 17)
assert rs == xp
def test_apply_corner(self):
pytest.raises(TypeError, BDay().apply, BMonthEnd())
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BDay()
offset2 = BDay()
assert not offset1 != offset2
class TestBusinessHour(Base):
_offset = BusinessHour
def setup_method(self, method):
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = BusinessHour()
self.offset2 = BusinessHour(n=3)
self.offset3 = BusinessHour(n=-1)
self.offset4 = BusinessHour(n=-4)
from datetime import time as dt_time
self.offset5 = BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30))
self.offset6 = BusinessHour(start='20:00', end='05:00')
self.offset7 = BusinessHour(n=-2, start=dt_time(21, 30),
end=dt_time(6, 30))
def test_constructor_errors(self):
from datetime import time as dt_time
with pytest.raises(ValueError):
BusinessHour(start=dt_time(11, 0, 5))
with pytest.raises(ValueError):
BusinessHour(start='AAA')
with pytest.raises(ValueError):
BusinessHour(start='14:00:05')
def test_different_normalize_equals(self):
# equivalent in this special case
offset = self._offset()
offset2 = self._offset()
offset2.normalize = True
assert offset == offset2
def test_repr(self):
assert repr(self.offset1) == '<BusinessHour: BH=09:00-17:00>'
assert repr(self.offset2) == '<3 * BusinessHours: BH=09:00-17:00>'
assert repr(self.offset3) == '<-1 * BusinessHour: BH=09:00-17:00>'
assert repr(self.offset4) == '<-4 * BusinessHours: BH=09:00-17:00>'
assert repr(self.offset5) == '<BusinessHour: BH=11:00-14:30>'
assert repr(self.offset6) == '<BusinessHour: BH=20:00-05:00>'
assert repr(self.offset7) == '<-2 * BusinessHours: BH=21:30-06:30>'
def test_with_offset(self):
expected = Timestamp('2014-07-01 13:00')
assert self.d + BusinessHour() * 3 == expected
assert self.d + BusinessHour(n=3) == expected
def testEQ(self):
for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
assert offset == offset
assert BusinessHour() != BusinessHour(-1)
assert BusinessHour(start='09:00') == BusinessHour()
assert BusinessHour(start='09:00') != BusinessHour(start='09:01')
assert (BusinessHour(start='09:00', end='17:00') !=
BusinessHour(start='17:00', end='09:01'))
def test_hash(self):
for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
assert hash(offset) == hash(offset)
def testCall(self):
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 13)
assert self.offset3(self.d) == datetime(2014, 6, 30, 17)
assert self.offset4(self.d) == datetime(2014, 6, 30, 14)
def testRAdd(self):
assert self.d + self.offset2 == self.offset2 + self.d
def testSub(self):
off = self.offset2
pytest.raises(Exception, off.__sub__, self.d)
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + self._offset(-3)
def testRSub(self):
assert self.d - self.offset2 == (-self.offset2).apply(self.d)
def testMult1(self):
assert self.d + 5 * self.offset1 == self.d + self._offset(5)
def testMult2(self):
assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
assert self.offset3.rollback(self.d) == self.d
assert self.offset4.rollback(self.d) == self.d
assert self.offset5.rollback(self.d) == datetime(2014, 6, 30, 14, 30)
assert self.offset6.rollback(self.d) == datetime(2014, 7, 1, 5, 0)
assert self.offset7.rollback(self.d) == datetime(2014, 7, 1, 6, 30)
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset2.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset3.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset4.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset5.rollback(d) == datetime(2014, 6, 30, 14, 30)
assert self.offset6.rollback(d) == d
assert self.offset7.rollback(d) == d
assert self._offset(5).rollback(self.d) == self.d
def testRollback2(self):
assert (self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) ==
datetime(2014, 7, 4, 17, 0))
def testRollforward1(self):
assert self.offset1.rollforward(self.d) == self.d
assert self.offset2.rollforward(self.d) == self.d
assert self.offset3.rollforward(self.d) == self.d
assert self.offset4.rollforward(self.d) == self.d
assert (self.offset5.rollforward(self.d) ==
datetime(2014, 7, 1, 11, 0))
assert (self.offset6.rollforward(self.d) ==
datetime(2014, 7, 1, 20, 0))
assert (self.offset7.rollforward(self.d) ==
datetime(2014, 7, 1, 21, 30))
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset3.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset4.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset5.rollforward(d) == datetime(2014, 7, 1, 11)
assert self.offset6.rollforward(d) == d
assert self.offset7.rollforward(d) == d
assert self._offset(5).rollforward(self.d) == self.d
def testRollforward2(self):
assert (self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) ==
datetime(2014, 7, 7, 9))
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
assert result == datetime(2014, 7, 4, 17)
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
def test_normalize(self):
tests = []
tests.append((BusinessHour(normalize=True),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
tests.append((BusinessHour(-1, normalize=True),
{datetime(2014, 7, 1, 8): datetime(2014, 6, 30),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30),
datetime(2014, 7, 1, 0): datetime(2014, 6, 30),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
tests.append((BusinessHour(1, normalize=True, start='17:00',
end='04:00'),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 2),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
for offset, cases in tests:
for dt, expected in compat.iteritems(cases):
assert offset.apply(dt) == expected
def test_onOffset(self):
tests = []
tests.append((BusinessHour(), {datetime(2014, 7, 1, 9): True,
datetime(2014, 7, 1, 8, 59): False,
datetime(2014, 7, 1, 8): False,
datetime(2014, 7, 1, 17): True,
datetime(2014, 7, 1, 17, 1): False,
datetime(2014, 7, 1, 18): False,
datetime(2014, 7, 5, 9): False,
datetime(2014, 7, 6, 12): False}))
tests.append((BusinessHour(start='10:00', end='15:00'),
{datetime(2014, 7, 1, 9): False,
datetime(2014, 7, 1, 10): True,
datetime(2014, 7, 1, 15): True,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12): False,
datetime(2014, 7, 6, 12): False}))
tests.append((BusinessHour(start='19:00', end='05:00'),
{datetime(2014, 7, 1, 9, 0): False,
datetime(2014, 7, 1, 10, 0): False,
datetime(2014, 7, 1, 15): False,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12, 0): False,
datetime(2014, 7, 6, 12, 0): False,
datetime(2014, 7, 1, 19, 0): True,
datetime(2014, 7, 2, 0, 0): True,
datetime(2014, 7, 4, 23): True,
datetime(2014, 7, 5, 1): True,
datetime(2014, 7, 5, 5, 0): True,
datetime(2014, 7, 6, 23, 0): False,
datetime(2014, 7, 7, 3, 0): False}))
for offset, cases in tests:
for dt, expected in compat.iteritems(cases):
assert offset.onOffset(dt) == expected
def test_opening_time(self):
tests = []
# opening time should be affected by sign of n, not by n's value and
# end
tests.append((
[BusinessHour(), BusinessHour(n=2), BusinessHour(
n=4), BusinessHour(end='10:00'), BusinessHour(n=2, end='4:00'),
BusinessHour(n=4, end='15:00')],
{datetime(2014, 7, 1, 11): (datetime(2014, 7, 2, 9), datetime(
2014, 7, 1, 9)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 9), datetime(
2014, 7, 1, 9)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 9), datetime(
2014, 7, 1, 9)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 9), datetime(
2014, 7, 1, 9)),
# if timestamp is on opening time, next opening time is
# as it is
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9), datetime(
2014, 7, 2, 9)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 3, 9), datetime(
2014, 7, 2, 9)),
# 2014-07-05 is saturday
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 9), datetime(
2014, 7, 4, 9)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 7, 9), datetime(
2014, 7, 4, 9)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 9), datetime(
2014, 7, 4, 9)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 9), datetime(
2014, 7, 4, 9)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 9), datetime(
2014, 7, 4, 9)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 8, 9), datetime(
2014, 7, 7, 9))}))
tests.append(([BusinessHour(start='11:15'),
BusinessHour(n=2, start='11:15'),
BusinessHour(n=3, start='11:15'),
BusinessHour(start='11:15', end='10:00'),
BusinessHour(n=2, start='11:15', end='4:00'),
BusinessHour(n=3, start='11:15', end='15:00')],
{datetime(2014, 7, 1, 11): (datetime(
2014, 7, 1, 11, 15), datetime(2014, 6, 30, 11, 15)),
datetime(2014, 7, 1, 18): (datetime(
2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 1, 23): (datetime(
2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 10): (datetime(
2014, 7, 2, 11, 15), datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 11, 15): (datetime(
2014, 7, 2, 11, 15), datetime(2014, 7, 2, 11, 15)),
datetime(2014, 7, 2, 11, 15, 1): (datetime(
2014, 7, 3, 11, 15), datetime(2014, 7, 2, 11, 15)),
datetime(2014, 7, 5, 10): (datetime(
2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 4, 10): (datetime(
2014, 7, 4, 11, 15), datetime(2014, 7, 3, 11, 15)),
datetime(2014, 7, 4, 23): (datetime(
2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 6, 10): (datetime(
2014, 7, 7, 11, 15), datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 7, 9, 1): (
datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15))}))
tests.append(([BusinessHour(-1), BusinessHour(n=-2),
BusinessHour(n=-4),
BusinessHour(n=-1, end='10:00'),
BusinessHour(n=-2, end='4:00'),
BusinessHour(n=-4, end='15:00')],
{datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 3, 9)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 9): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 8, 9))}))
tests.append(([BusinessHour(start='17:00', end='05:00'),
BusinessHour(n=3, start='17:00', end='03:00')],
{datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 17),
datetime(2014, 6, 30, 17)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 4, 17): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 3, 17)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 7, 17, 1): (datetime(
2014, 7, 8, 17), datetime(2014, 7, 7, 17)), }))
tests.append(([BusinessHour(-1, start='17:00', end='05:00'),
BusinessHour(n=-2, start='17:00', end='03:00')],
{datetime(2014, 7, 1, 11): (datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 16, 59): (datetime(
2014, 7, 1, 17), datetime(2014, 7, 2, 17)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 3, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 7, 18): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 8, 17))}))
for _offsets, cases in tests:
for offset in _offsets:
for dt, (exp_next, exp_prev) in compat.iteritems(cases):
assert offset._next_opening_time(dt) == exp_next
assert offset._prev_opening_time(dt) == exp_prev
def test_apply(self):
tests = []
tests.append((
BusinessHour(),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30,
30)}))
tests.append((BusinessHour(
4), {datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30,
30)}))
tests.append(
(BusinessHour(-1),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 16, 30, 15): datetime(
2014, 7, 1, 15, 30, 15),
datetime(2014, 7, 1, 9, 30, 15): datetime(
2014, 6, 30, 16, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30,
30)}))
tests.append((BusinessHour(
-4), {datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),
datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30,
30)}))
tests.append((BusinessHour(start='13:00', end='16:00'),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2,
13, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14)}))
tests.append((BusinessHour(n=2, start='13:00', end='16:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),
datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30)
}))
tests.append((BusinessHour(n=-1, start='13:00', end='16:00'),
{datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1,
15, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),
datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15)}))
tests.append((BusinessHour(n=-3, start='10:00', end='16:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),
datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30)
}))
tests.append((BusinessHour(start='19:00', end='05:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),
datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),
datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),
datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),
datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),
datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30)
}))
tests.append((BusinessHour(n=-1, start='19:00', end='05:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),
datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),
datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30)
}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_large_n(self):
tests = []
tests.append(
(BusinessHour(40), # A week later
{datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),
datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),
datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),
datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),
datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),
datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),
datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),
datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),
datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30,
30)}))
tests.append(
(BusinessHour(-25), # 3 days and 1 hour before
{datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),
datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),
datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),
datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),
datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),
datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),
datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),
datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),
datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30,
30)}))
# 5 days and 3 hours later
tests.append((BusinessHour(28, start='21:00', end='02:00'),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),
datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),
datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),
datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),
datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21,
30)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_nanoseconds(self):
tests = []
tests.append((BusinessHour(),
{Timestamp('2014-07-04 15:00') + Nano(5): Timestamp(
'2014-07-04 16:00') + Nano(5),
Timestamp('2014-07-04 16:00') + Nano(5): Timestamp(
'2014-07-07 09:00') + Nano(5),
Timestamp('2014-07-04 16:00') - Nano(5): Timestamp(
'2014-07-04 17:00') - Nano(5)}))
tests.append((BusinessHour(-1),
{Timestamp('2014-07-04 15:00') + Nano(5): Timestamp(
'2014-07-04 14:00') + Nano(5),
Timestamp('2014-07-04 10:00') + Nano(5): Timestamp(
'2014-07-04 09:00') + Nano(5),
Timestamp('2014-07-04 10:00') - Nano(5): Timestamp(
'2014-07-03 17:00') - Nano(5), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = self._offset()
offset2 = self._offset()
assert not offset1 != offset2
def test_datetimeindex(self):
idx1 = DatetimeIndex(start='2014-07-04 15:00', end='2014-07-08 10:00',
freq='BH')
idx2 = DatetimeIndex(start='2014-07-04 15:00', periods=12, freq='BH')
idx3 = DatetimeIndex(end='2014-07-08 10:00', periods=12, freq='BH')
expected = DatetimeIndex(['2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00',
'2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00',
'2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00',
'2014-07-07 16:00', '2014-07-08 09:00',
'2014-07-08 10:00'],
freq='BH')
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
idx1 = DatetimeIndex(start='2014-07-04 15:45', end='2014-07-08 10:45',
freq='BH')
idx2 = DatetimeIndex(start='2014-07-04 15:45', periods=12, freq='BH')
idx3 = DatetimeIndex(end='2014-07-08 10:45', periods=12, freq='BH')
expected = DatetimeIndex(['2014-07-04 15:45', '2014-07-04 16:45',
'2014-07-07 09:45',
'2014-07-07 10:45', '2014-07-07 11:45',
'2014-07-07 12:45',
'2014-07-07 13:45', '2014-07-07 14:45',
'2014-07-07 15:45',
'2014-07-07 16:45', '2014-07-08 09:45',
'2014-07-08 10:45'],
freq='BH')
expected = idx1
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
class TestCustomBusinessHour(Base):
_offset = CustomBusinessHour
def setup_method(self, method):
# 2014 Calendar to check custom holidays
# Sun Mon Tue Wed Thu Fri Sat
# 6/22 23 24 25 26 27 28
# 29 30 7/1 2 3 4 5
# 6 7 8 9 10 11 12
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = CustomBusinessHour(weekmask='Tue Wed Thu Fri')
self.holidays = ['2014-06-27', datetime(2014, 6, 30),
np.datetime64('2014-07-02')]
self.offset2 = CustomBusinessHour(holidays=self.holidays)
def test_constructor_errors(self):
from datetime import time as dt_time
with pytest.raises(ValueError):
CustomBusinessHour(start=dt_time(11, 0, 5))
with pytest.raises(ValueError):
CustomBusinessHour(start='AAA')
with pytest.raises(ValueError):
CustomBusinessHour(start='14:00:05')
def test_different_normalize_equals(self):
# equivalent in this special case
offset = self._offset()
offset2 = self._offset()
offset2.normalize = True
assert offset == offset2
def test_repr(self):
assert repr(self.offset1) == '<CustomBusinessHour: CBH=09:00-17:00>'
assert repr(self.offset2) == '<CustomBusinessHour: CBH=09:00-17:00>'
def test_with_offset(self):
expected = Timestamp('2014-07-01 13:00')
assert self.d + CustomBusinessHour() * 3 == expected
assert self.d + CustomBusinessHour(n=3) == expected
def testEQ(self):
for offset in [self.offset1, self.offset2]:
assert offset == offset
assert CustomBusinessHour() != CustomBusinessHour(-1)
assert (CustomBusinessHour(start='09:00') ==
CustomBusinessHour())
assert (CustomBusinessHour(start='09:00') !=
CustomBusinessHour(start='09:01'))
assert (CustomBusinessHour(start='09:00', end='17:00') !=
CustomBusinessHour(start='17:00', end='09:01'))
assert (CustomBusinessHour(weekmask='Tue Wed Thu Fri') !=
CustomBusinessHour(weekmask='Mon Tue Wed Thu Fri'))
assert (CustomBusinessHour(holidays=['2014-06-27']) !=
CustomBusinessHour(holidays=['2014-06-28']))
def test_hash(self):
assert hash(self.offset1) == hash(self.offset1)
assert hash(self.offset2) == hash(self.offset2)
def testCall(self):
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 11)
def testRAdd(self):
assert self.d + self.offset2 == self.offset2 + self.d
def testSub(self):
off = self.offset2
pytest.raises(Exception, off.__sub__, self.d)
assert 2 * off - off == off
assert self.d - self.offset2 == self.d - (2 * off - off)
def testRSub(self):
assert self.d - self.offset2 == (-self.offset2).apply(self.d)
def testMult1(self):
assert self.d + 5 * self.offset1 == self.d + self._offset(5)
def testMult2(self):
assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
d = datetime(2014, 7, 1, 0)
# 2014/07/01 is Tuesday, 06/30 is Monday(holiday)
assert self.offset1.rollback(d) == datetime(2014, 6, 27, 17)
# 2014/6/30 and 2014/6/27 are holidays
assert self.offset2.rollback(d) == datetime(2014, 6, 26, 17)
def testRollback2(self):
assert (self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) ==
datetime(2014, 7, 4, 17, 0))
def testRollforward1(self):
assert self.offset1.rollforward(self.d) == self.d
assert self.offset2.rollforward(self.d) == self.d
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
def testRollforward2(self):
assert (self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) ==
datetime(2014, 7, 7, 9))
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
assert result == datetime(2014, 7, 4, 17)
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
def test_normalize(self):
tests = []
tests.append((CustomBusinessHour(normalize=True,
holidays=self.holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3),
datetime(2014, 7, 1, 23): datetime(2014, 7, 3),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
tests.append((CustomBusinessHour(-1, normalize=True,
holidays=self.holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 6, 26),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 26),
datetime(2014, 7, 1, 0): datetime(2014, 6, 26),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
tests.append((CustomBusinessHour(1, normalize=True, start='17:00',
end='04:00', holidays=self.holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 3),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
for offset, cases in tests:
for dt, expected in compat.iteritems(cases):
assert offset.apply(dt) == expected
def test_onOffset(self):
tests = []
tests.append((CustomBusinessHour(start='10:00', end='15:00',
holidays=self.holidays),
{datetime(2014, 7, 1, 9): False,
datetime(2014, 7, 1, 10): True,
datetime(2014, 7, 1, 15): True,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12): False,
datetime(2014, 7, 6, 12): False}))
for offset, cases in tests:
for dt, expected in compat.iteritems(cases):
assert offset.onOffset(dt) == expected
def test_apply(self):
tests = []
tests.append((
CustomBusinessHour(holidays=self.holidays),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30,
30)}))
tests.append((
CustomBusinessHour(4, holidays=self.holidays),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30,
30)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_nanoseconds(self):
tests = []
tests.append((CustomBusinessHour(holidays=self.holidays),
{Timestamp('2014-07-01 15:00') + Nano(5): Timestamp(
'2014-07-01 16:00') + Nano(5),
Timestamp('2014-07-01 16:00') + Nano(5): Timestamp(
'2014-07-03 09:00') + Nano(5),
Timestamp('2014-07-01 16:00') - Nano(5): Timestamp(
'2014-07-01 17:00') - Nano(5)}))
tests.append((CustomBusinessHour(-1, holidays=self.holidays),
{Timestamp('2014-07-01 15:00') + Nano(5): Timestamp(
'2014-07-01 14:00') + Nano(5),
Timestamp('2014-07-01 10:00') + Nano(5): Timestamp(
'2014-07-01 09:00') + Nano(5),
Timestamp('2014-07-01 10:00') - Nano(5): Timestamp(
'2014-06-26 17:00') - Nano(5), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
class TestCustomBusinessDay(Base):
_offset = CDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.nd = np_datetime64_compat('2008-01-01 00:00:00Z')
self.offset = CDay()
self.offset2 = CDay(2)
def test_different_normalize_equals(self):
# equivalent in this special case
offset = CDay()
offset2 = CDay()
offset2.normalize = True
assert offset == offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessDay>'
assert repr(self.offset2) == '<2 * CustomBusinessDays>'
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def testEQ(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 1, 3)
assert self.offset2(self.nd) == datetime(2008, 1, 3)
def testRAdd(self):
assert self.d + self.offset2 == self.offset2 + self.d
def testSub(self):
off = self.offset2
pytest.raises(Exception, off.__sub__, self.d)
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + CDay(-2)
def testRSub(self):
assert self.d - self.offset2 == (-self.offset2).apply(self.d)
def testMult1(self):
assert self.d + 10 * self.offset == self.d + CDay(10)
def testMult2(self):
assert self.d + (-5 * CDay(-10)) == self.d + CDay(50)
def testRollback1(self):
assert CDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert (CDay(10).rollback(datetime(2008, 1, 5)) ==
datetime(2008, 1, 4))
def testRollforward1(self):
assert CDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert (CDay(10).rollforward(datetime(2008, 1, 5)) ==
datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = CDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
def test_onOffset(self):
tests = [(CDay(), datetime(2008, 1, 1), True),
(CDay(), datetime(2008, 1, 5), False)]
for offset, d, expected in tests:
assertOnOffset(offset, d, expected)
def test_apply(self):
tests = []
tests.append((CDay(), {datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
tests.append((2 * CDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}
))
tests.append((-CDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}
))
tests.append((-2 * CDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}
))
tests.append((CDay(0), {datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CDay(10)
assert result == datetime(2012, 11, 6)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
def test_apply_corner(self):
pytest.raises(Exception, CDay().apply, BMonthEnd())
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = CDay()
offset2 = CDay()
assert not offset1 != offset2
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
tday = CDay(holidays=holidays)
for year in range(2012, 2015):
dt = datetime(year, 4, 30)
xp = datetime(year, 5, 2)
rs = dt + tday
assert rs == xp
def test_weekmask(self):
weekmask_saudi = 'Sat Sun Mon Tue Wed' # Thu-Fri Weekend
weekmask_uae = '1111001' # Fri-Sat Weekend
weekmask_egypt = [1, 1, 1, 1, 0, 0, 1] # Fri-Sat Weekend
bday_saudi = CDay(weekmask=weekmask_saudi)
bday_uae = CDay(weekmask=weekmask_uae)
bday_egypt = CDay(weekmask=weekmask_egypt)
dt = datetime(2013, 5, 1)
xp_saudi = datetime(2013, 5, 4)
xp_uae = datetime(2013, 5, 2)
xp_egypt = datetime(2013, 5, 2)
assert xp_saudi == dt + bday_saudi
assert xp_uae == dt + bday_uae
assert xp_egypt == dt + bday_egypt
xp2 = datetime(2013, 5, 5)
assert xp2 == dt + 2 * bday_saudi
assert xp2 == dt + 2 * bday_uae
assert xp2 == dt + 2 * bday_egypt
def test_weekmask_and_holidays(self):
weekmask_egypt = 'Sun Mon Tue Wed Thu' # Fri-Sat Weekend
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime(2013, 4, 30)
xp_egypt = datetime(2013, 5, 5)
assert xp_egypt == dt + 2 * bday_egypt
def test_calendar(self):
calendar = USFederalHolidayCalendar()
dt = datetime(2014, 1, 17)
assertEq(CDay(calendar=calendar), dt, datetime(2014, 1, 21))
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self.offset)
_check_roundtrip(self.offset2)
_check_roundtrip(self.offset * 2)
def test_pickle_compat_0_14_1(self):
hdays = [datetime(2013, 1, 1) for ele in range(4)]
pth = tm.get_data_path()
cday0_14_1 = read_pickle(os.path.join(pth, 'cday-0.14.1.pickle'))
cday = CDay(holidays=hdays)
assert cday == cday0_14_1
class CustomBusinessMonthBase(object):
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._object()
self.offset2 = self._object(2)
def testEQ(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def testRAdd(self):
assert self.d + self.offset2 == self.offset2 + self.d
def testSub(self):
off = self.offset2
pytest.raises(Exception, off.__sub__, self.d)
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + self._object(-2)
def testRSub(self):
assert self.d - self.offset2 == (-self.offset2).apply(self.d)
def testMult1(self):
assert self.d + 10 * self.offset == self.d + self._object(10)
def testMult2(self):
assert self.d + (-5 * self._object(-10)) == self.d + self._object(50)
def test_offsets_compare_equal(self):
offset1 = self._object()
offset2 = self._object()
assert not offset1 != offset2
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._object())
_check_roundtrip(self._object(2))
_check_roundtrip(self._object() * 2)
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_object = CBMonthEnd
def test_different_normalize_equals(self):
# equivalent in this special case
offset = CBMonthEnd()
offset2 = CBMonthEnd()
offset2.normalize = True
assert offset == offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthEnd>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthEnds>'
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert (CDay(10).rollback(datetime(2007, 12, 31)) ==
datetime(2007, 12, 31))
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
def test_onOffset(self):
tests = [(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False)]
for offset, d, expected in tests:
assertOnOffset(offset, d, expected)
def test_apply(self):
cbm = CBMonthEnd()
tests = []
tests.append((cbm, {datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
tests.append((2 * cbm, {datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31)}))
tests.append((-cbm, {datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31)}))
tests.append((-2 * cbm, {datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31)}
))
tests.append((CBMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
assert result == datetime(2013, 7, 31)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-01-31', datetime(2012, 2, 28),
np.datetime64('2012-02-29')]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 30)
assert dt + 2 * bm_offset == datetime(2012, 2, 27)
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
assert (DatetimeIndex(start='20120101', end='20130101',
freq=freq).tolist()[0] == datetime(2012, 1, 31))
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_object = CBMonthBegin
def test_different_normalize_equals(self):
# equivalent in this special case
offset = CBMonthBegin()
offset2 = CBMonthBegin()
offset2.normalize = True
assert offset == offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthBegin>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthBegins>'
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 3, 3)
def testRollback1(self):
assert (CDay(10).rollback(datetime(2007, 12, 31)) ==
datetime(2007, 12, 31))
def testRollback2(self):
assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
def testRollforward1(self):
assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 3)
result = offset.rollforward(dt)
assert result == datetime(2012, 10, 1)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
def test_onOffset(self):
tests = [(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False)]
for offset, dt, expected in tests:
assertOnOffset(offset, dt, expected)
def test_apply(self):
cbm = CBMonthBegin()
tests = []
tests.append((cbm, {datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3)}))
tests.append((2 * cbm, {datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1)}))
tests.append((-cbm, {datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1)}))
tests.append((-2 * cbm, {datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1)}))
tests.append((CBMonthBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
assert result == datetime(2013, 8, 1)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-02-01', datetime(2012, 2, 2),
np.datetime64('2012-03-01')]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 2)
assert dt + 2 * bm_offset == datetime(2012, 2, 3)
def test_datetimeindex(self):
hcal = USFederalHolidayCalendar()
cbmb = CBMonthBegin(calendar=hcal)
assert (DatetimeIndex(start='20120101', end='20130101',
freq=cbmb).tolist()[0] == datetime(2012, 1, 3))
def assertOnOffset(offset, date, expected):
actual = offset.onOffset(date)
assert actual == expected, ("\nExpected: %s\nActual: %s\nFor Offset: %s)"
"\nAt Date: %s" %
(expected, actual, offset, date))
class TestWeek(Base):
_offset = Week
def test_repr(self):
assert repr(Week(weekday=0)) == "<Week: weekday=0>"
assert repr(Week(n=-1, weekday=0)) == "<-1 * Week: weekday=0>"
assert repr(Week(n=-2, weekday=0)) == "<-2 * Weeks: weekday=0>"
def test_corner(self):
pytest.raises(ValueError, Week, weekday=7)
tm.assert_raises_regex(
ValueError, "Day must be", Week, weekday=-1)
def test_isAnchored(self):
assert Week(weekday=0).isAnchored()
assert not Week().isAnchored()
assert not Week(2, weekday=2).isAnchored()
assert not Week(2).isAnchored()
def test_offset(self):
tests = []
tests.append((Week(), # not business week
{datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(weekday=0), # Mon
{datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
tests.append((Week(0, weekday=0), # n=0 -> roll forward. Mon
{datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
tests.append((Week(-2, weekday=1), # n=0 -> roll forward. Mon
{datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
for weekday in range(7):
offset = Week(weekday=weekday)
for day in range(1, 8):
date = datetime(2008, 1, day)
if day % 7 == weekday:
expected = True
else:
expected = False
assertOnOffset(offset, date, expected)
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = Week()
offset2 = Week()
assert not offset1 != offset2
class TestWeekOfMonth(Base):
_offset = WeekOfMonth
def test_constructor(self):
tm.assert_raises_regex(ValueError, "^N cannot be 0",
WeekOfMonth, n=0, week=1, weekday=1)
tm.assert_raises_regex(ValueError, "^Week", WeekOfMonth,
n=1, week=4, weekday=0)
tm.assert_raises_regex(ValueError, "^Week", WeekOfMonth,
n=1, week=-1, weekday=0)
tm.assert_raises_regex(ValueError, "^Day", WeekOfMonth,
n=1, week=0, weekday=-1)
tm.assert_raises_regex(ValueError, "^Day", WeekOfMonth,
n=1, week=0, weekday=7)
def test_repr(self):
assert (repr(WeekOfMonth(weekday=1, week=2)) ==
"<WeekOfMonth: week=2, weekday=1>")
def test_offset(self):
date1 = datetime(2011, 1, 4) # 1st Tuesday of Month
date2 = datetime(2011, 1, 11) # 2nd Tuesday of Month
date3 = datetime(2011, 1, 18) # 3rd Tuesday of Month
date4 = datetime(2011, 1, 25) # 4th Tuesday of Month
# see for loop for structure
test_cases = [
(-2, 2, 1, date1, datetime(2010, 11, 16)),
(-2, 2, 1, date2, datetime(2010, 11, 16)),
(-2, 2, 1, date3, datetime(2010, 11, 16)),
(-2, 2, 1, date4, datetime(2010, 12, 21)),
(-1, 2, 1, date1, datetime(2010, 12, 21)),
(-1, 2, 1, date2, datetime(2010, 12, 21)),
(-1, 2, 1, date3, datetime(2010, 12, 21)),
(-1, 2, 1, date4, datetime(2011, 1, 18)),
(1, 0, 0, date1, datetime(2011, 2, 7)),
(1, 0, 0, date2, datetime(2011, 2, 7)),
(1, 0, 0, date3, datetime(2011, 2, 7)),
(1, 0, 0, date4, datetime(2011, 2, 7)),
(1, 0, 1, date1, datetime(2011, 2, 1)),
(1, 0, 1, date2, datetime(2011, 2, 1)),
(1, 0, 1, date3, datetime(2011, 2, 1)),
(1, 0, 1, date4, datetime(2011, 2, 1)),
(1, 0, 2, date1, datetime(2011, 1, 5)),
(1, 0, 2, date2, datetime(2011, 2, 2)),
(1, 0, 2, date3, datetime(2011, 2, 2)),
(1, 0, 2, date4, datetime(2011, 2, 2)),
(1, 2, 1, date1, datetime(2011, 1, 18)),
(1, 2, 1, date2, datetime(2011, 1, 18)),
(1, 2, 1, date3, datetime(2011, 2, 15)),
(1, 2, 1, date4, datetime(2011, 2, 15)),
(2, 2, 1, date1, datetime(2011, 2, 15)),
(2, 2, 1, date2, datetime(2011, 2, 15)),
(2, 2, 1, date3, datetime(2011, 3, 15)),
(2, 2, 1, date4, datetime(2011, 3, 15)),
]
for n, week, weekday, dt, expected in test_cases:
offset = WeekOfMonth(n, week=week, weekday=weekday)
assertEq(offset, dt, expected)
# try subtracting
result = datetime(2011, 2, 1) - WeekOfMonth(week=1, weekday=2)
assert result == datetime(2011, 1, 12)
result = datetime(2011, 2, 3) - WeekOfMonth(week=0, weekday=2)
assert result == datetime(2011, 2, 2)
def test_onOffset(self):
test_cases = [
(0, 0, datetime(2011, 2, 7), True),
(0, 0, datetime(2011, 2, 6), False),
(0, 0, datetime(2011, 2, 14), False),
(1, 0, datetime(2011, 2, 14), True),
(0, 1, datetime(2011, 2, 1), True),
(0, 1, datetime(2011, 2, 8), False),
]
for week, weekday, dt, expected in test_cases:
offset = WeekOfMonth(week=week, weekday=weekday)
assert offset.onOffset(dt) == expected
class TestLastWeekOfMonth(Base):
_offset = LastWeekOfMonth
def test_constructor(self):
tm.assert_raises_regex(ValueError, "^N cannot be 0",
LastWeekOfMonth, n=0, weekday=1)
tm.assert_raises_regex(ValueError, "^Day", LastWeekOfMonth, n=1,
weekday=-1)
tm.assert_raises_regex(
ValueError, "^Day", LastWeekOfMonth, n=1, weekday=7)
def test_offset(self):
# Saturday
last_sat = datetime(2013, 8, 31)
next_sat = datetime(2013, 9, 28)
offset_sat = LastWeekOfMonth(n=1, weekday=5)
one_day_before = (last_sat + timedelta(days=-1))
assert one_day_before + offset_sat == last_sat
one_day_after = (last_sat + timedelta(days=+1))
assert one_day_after + offset_sat == next_sat
# Test On that day
assert last_sat + offset_sat == next_sat
# Thursday
offset_thur = LastWeekOfMonth(n=1, weekday=3)
last_thurs = datetime(2013, 1, 31)
next_thurs = datetime(2013, 2, 28)
one_day_before = last_thurs + timedelta(days=-1)
assert one_day_before + offset_thur == last_thurs
one_day_after = last_thurs + timedelta(days=+1)
assert one_day_after + offset_thur == next_thurs
# Test on that day
assert last_thurs + offset_thur == next_thurs
three_before = last_thurs + timedelta(days=-3)
assert three_before + offset_thur == last_thurs
two_after = last_thurs + timedelta(days=+2)
assert two_after + offset_thur == next_thurs
offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN)
assert datetime(2013, 7, 31) + offset_sunday == datetime(2013, 8, 25)
def test_onOffset(self):
test_cases = [
(WeekDay.SUN, datetime(2013, 1, 27), True),
(WeekDay.SAT, datetime(2013, 3, 30), True),
(WeekDay.MON, datetime(2013, 2, 18), False), # Not the last Mon
(WeekDay.SUN, datetime(2013, 2, 25), False), # Not a SUN
(WeekDay.MON, datetime(2013, 2, 25), True),
(WeekDay.SAT, datetime(2013, 11, 30), True),
(WeekDay.SAT, datetime(2006, 8, 26), True),
(WeekDay.SAT, datetime(2007, 8, 25), True),
(WeekDay.SAT, datetime(2008, 8, 30), True),
(WeekDay.SAT, datetime(2009, 8, 29), True),
(WeekDay.SAT, datetime(2010, 8, 28), True),
(WeekDay.SAT, datetime(2011, 8, 27), True),
(WeekDay.SAT, datetime(2019, 8, 31), True),
]
for weekday, dt, expected in test_cases:
offset = LastWeekOfMonth(weekday=weekday)
assert offset.onOffset(dt) == expected
class TestBMonthBegin(Base):
_offset = BMonthBegin
def test_offset(self):
tests = []
tests.append((BMonthBegin(),
{datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 9, 1): datetime(2006, 10, 2),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2006, 12, 1): datetime(2007, 1, 1)}))
tests.append((BMonthBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2006, 10, 2): datetime(2006, 10, 2),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 9, 15): datetime(2006, 10, 2)}))
tests.append((BMonthBegin(2),
{datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 1, 15): datetime(2008, 3, 3),
datetime(2006, 12, 29): datetime(2007, 2, 1),
datetime(2006, 12, 31): datetime(2007, 2, 1),
datetime(2007, 1, 1): datetime(2007, 3, 1),
datetime(2006, 11, 1): datetime(2007, 1, 1)}))
tests.append((BMonthBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 2),
datetime(2008, 6, 1): datetime(2008, 5, 1),
datetime(2008, 3, 10): datetime(2008, 3, 3),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 30): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 1)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
tests = [(BMonthBegin(), datetime(2007, 12, 31), False),
(BMonthBegin(), datetime(2008, 1, 1), True),
(BMonthBegin(), datetime(2001, 4, 2), True),
(BMonthBegin(), datetime(2008, 3, 3), True)]
for offset, dt, expected in tests:
assertOnOffset(offset, dt, expected)
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BMonthBegin()
offset2 = BMonthBegin()
assert not offset1 != offset2
class TestBMonthEnd(Base):
_offset = BMonthEnd
def test_offset(self):
tests = []
tests.append((BMonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 29),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((BMonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 2, 28),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 29)}))
tests.append((BMonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2008, 5, 30),
datetime(2008, 12, 31): datetime(2008, 11, 28),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_normalize(self):
dt = datetime(2007, 1, 1, 3)
result = dt + BMonthEnd(normalize=True)
expected = dt.replace(hour=0) + BMonthEnd()
assert result == expected
def test_onOffset(self):
tests = [(BMonthEnd(), datetime(2007, 12, 31), True),
(BMonthEnd(), datetime(2008, 1, 1), False)]
for offset, dt, expected in tests:
assertOnOffset(offset, dt, expected)
def test_offsets_compare_equal(self):
# root cause of #456
offset1 = BMonthEnd()
offset2 = BMonthEnd()
assert not offset1 != offset2
class TestMonthBegin(Base):
_offset = MonthBegin
def test_offset(self):
tests = []
# NOTE: I'm not entirely happy with the logic here for Begin -ss
# see thread 'offset conventions' on the ML
tests.append((MonthBegin(),
{datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 2, 1): datetime(2008, 3, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2007, 1, 31): datetime(2007, 2, 1)}))
tests.append((MonthBegin(0),
{datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2006, 12, 3): datetime(2007, 1, 1),
datetime(2007, 1, 31): datetime(2007, 2, 1)}))
tests.append((MonthBegin(2),
{datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 1, 31): datetime(2008, 3, 1),
datetime(2006, 12, 31): datetime(2007, 2, 1),
datetime(2007, 12, 28): datetime(2008, 2, 1),
datetime(2007, 1, 1): datetime(2007, 3, 1),
datetime(2006, 11, 1): datetime(2007, 1, 1)}))
tests.append((MonthBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 5, 31): datetime(2008, 5, 1),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 1, 2): datetime(2006, 1, 1)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
class TestMonthEnd(Base):
_offset = MonthEnd
def test_offset(self):
tests = []
tests.append((MonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2006, 12, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31)}))
tests.append((MonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 3, 31),
datetime(2006, 12, 29): datetime(2007, 1, 31),
datetime(2006, 12, 31): datetime(2007, 2, 28),
datetime(2007, 1, 1): datetime(2007, 2, 28),
datetime(2006, 11, 1): datetime(2006, 12, 31)}))
tests.append((MonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 30): datetime(2006, 11, 30),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_day_of_month(self):
dt = datetime(2007, 1, 1)
offset = MonthEnd(day=20)
result = dt + offset
assert result == Timestamp(2007, 1, 31)
result = result + offset
assert result == Timestamp(2007, 2, 28)
def test_normalize(self):
dt = datetime(2007, 1, 1, 3)
result = dt + MonthEnd(normalize=True)
expected = dt.replace(hour=0) + MonthEnd()
assert result == expected
def test_onOffset(self):
tests = [(MonthEnd(), datetime(2007, 12, 31), True),
(MonthEnd(), datetime(2008, 1, 1), False)]
for offset, dt, expected in tests:
assertOnOffset(offset, dt, expected)
class TestSemiMonthEnd(Base):
_offset = SemiMonthEnd
def _get_tests(self):
tests = []
tests.append((SemiMonthEnd(),
{datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 31)}))
tests.append((SemiMonthEnd(day_of_month=20),
{datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 20),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 20),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20)}))
tests.append((SemiMonthEnd(0),
{datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 16): datetime(2008, 1, 31),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 15)}))
tests.append((SemiMonthEnd(0, day_of_month=16),
{datetime(2008, 1, 1): datetime(2008, 1, 16),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 16)}))
tests.append((SemiMonthEnd(2),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 11, 30)}))
tests.append((SemiMonthEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 30): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
tests.append((SemiMonthEnd(-1, day_of_month=4),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2007, 1, 4): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
tests.append((SemiMonthEnd(-2),
{datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 2, 15),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 14): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 15)}))
return tests
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 31),
datetime(2008, 1, 15),
datetime(2008, 1, 31),
datetime(2008, 2, 15),
datetime(2008, 2, 29),
datetime(2008, 3, 15),
datetime(2008, 3, 31),
datetime(2008, 4, 15),
datetime(2008, 4, 30),
datetime(2008, 5, 15),
datetime(2008, 5, 31),
datetime(2008, 6, 15),
datetime(2008, 6, 30),
datetime(2008, 7, 15),
datetime(2008, 7, 31),
datetime(2008, 8, 15),
datetime(2008, 8, 31),
datetime(2008, 9, 15),
datetime(2008, 9, 30),
datetime(2008, 10, 15),
datetime(2008, 10, 31),
datetime(2008, 11, 15),
datetime(2008, 11, 30),
datetime(2008, 12, 15),
datetime(2008, 12, 31))
for base, exp_date in zip(dates[:-1], dates[1:]):
assertEq(SemiMonthEnd(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
result = SemiMonthEnd().apply_index(s)
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = DatetimeIndex(start=dates[0], end=dates[-1], freq='SM')
exp = DatetimeIndex(dates)
tm.assert_index_equal(result, exp)
def test_offset(self):
for offset, cases in self._get_tests():
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_index(self):
for offset, cases in self._get_tests():
s = DatetimeIndex(cases.keys())
result = offset.apply_index(s)
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
def test_onOffset(self):
tests = [(datetime(2007, 12, 31), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 1), False),
(datetime(2008, 2, 29), True)]
for dt, expected in tests:
assertOnOffset(SemiMonthEnd(), dt, expected)
def test_vectorized_offset_addition(self):
for klass, assert_func in zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]):
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'),
Timestamp('2000-02-01', tz='US/Central')], name='a')
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
class TestSemiMonthBegin(Base):
_offset = SemiMonthBegin
def _get_tests(self):
tests = []
tests.append((SemiMonthBegin(),
{datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2007, 1, 1)}))
tests.append((SemiMonthBegin(day_of_month=20),
{datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20)}))
tests.append((SemiMonthBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 2): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2007, 1, 1)}))
tests.append((SemiMonthBegin(0, day_of_month=16),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 5): datetime(2007, 1, 16),
datetime(2007, 1, 1): datetime(2007, 1, 1)}))
tests.append((SemiMonthBegin(2),
{datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 15): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 12, 1)}))
tests.append((SemiMonthBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 6, 14): datetime(2008, 6, 1),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 15)}))
tests.append((SemiMonthBegin(-1, day_of_month=4),
{datetime(2007, 1, 1): datetime(2006, 12, 4),
datetime(2007, 1, 4): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2006, 12, 2): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 4)}))
tests.append((SemiMonthBegin(-2),
{datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 1),
datetime(2008, 6, 14): datetime(2008, 5, 15),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 15): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 1)}))
return tests
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 15),
datetime(2008, 1, 1),
datetime(2008, 1, 15),
datetime(2008, 2, 1),
datetime(2008, 2, 15),
datetime(2008, 3, 1),
datetime(2008, 3, 15),
datetime(2008, 4, 1),
datetime(2008, 4, 15),
datetime(2008, 5, 1),
datetime(2008, 5, 15),
datetime(2008, 6, 1),
datetime(2008, 6, 15),
datetime(2008, 7, 1),
datetime(2008, 7, 15),
datetime(2008, 8, 1),
datetime(2008, 8, 15),
datetime(2008, 9, 1),
datetime(2008, 9, 15),
datetime(2008, 10, 1),
datetime(2008, 10, 15),
datetime(2008, 11, 1),
datetime(2008, 11, 15),
datetime(2008, 12, 1),
datetime(2008, 12, 15))
for base, exp_date in zip(dates[:-1], dates[1:]):
assertEq(SemiMonthBegin(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
result = SemiMonthBegin().apply_index(s)
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = DatetimeIndex(start=dates[0], end=dates[-1], freq='SMS')
exp = DatetimeIndex(dates)
tm.assert_index_equal(result, exp)
def test_offset(self):
for offset, cases in self._get_tests():
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_apply_index(self):
for offset, cases in self._get_tests():
s = DatetimeIndex(cases.keys())
result = offset.apply_index(s)
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
def test_onOffset(self):
tests = [(datetime(2007, 12, 1), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 31), False),
(datetime(2008, 2, 15), True)]
for dt, expected in tests:
assertOnOffset(SemiMonthBegin(), dt, expected)
def test_vectorized_offset_addition(self):
for klass, assert_func in zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]):
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + SemiMonthBegin()
result2 = SemiMonthBegin() + s
exp = klass([Timestamp('2000-02-01 00:15:00', tz='US/Central'),
Timestamp('2000-03-01', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'),
Timestamp('2000-02-01', tz='US/Central')], name='a')
result = s + SemiMonthBegin()
result2 = SemiMonthBegin() + s
exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
class TestBQuarterBegin(Base):
_offset = BQuarterBegin
def test_repr(self):
assert (repr(BQuarterBegin()) ==
"<BusinessQuarterBegin: startingMonth=3>")
assert (repr(BQuarterBegin(startingMonth=3)) ==
"<BusinessQuarterBegin: startingMonth=3>")
assert (repr(BQuarterBegin(startingMonth=1)) ==
"<BusinessQuarterBegin: startingMonth=1>")
def test_isAnchored(self):
assert BQuarterBegin(startingMonth=1).isAnchored()
assert BQuarterBegin().isAnchored()
assert not BQuarterBegin(2, startingMonth=1).isAnchored()
def test_offset(self):
tests = []
tests.append((BQuarterBegin(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 4, 1),
datetime(2008, 1, 31): datetime(2008, 4, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2007, 3, 15): datetime(2007, 4, 2),
datetime(2007, 2, 28): datetime(2007, 4, 2),
datetime(2007, 1, 1): datetime(2007, 4, 2),
datetime(2007, 4, 15): datetime(2007, 7, 2),
datetime(2007, 7, 1): datetime(2007, 7, 2),
datetime(2007, 4, 1): datetime(2007, 4, 2),
datetime(2007, 4, 2): datetime(2007, 7, 2),
datetime(2008, 4, 30): datetime(2008, 7, 1), }))
tests.append((BQuarterBegin(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 2, 29): datetime(2008, 5, 1),
datetime(2008, 3, 15): datetime(2008, 5, 1),
datetime(2008, 3, 31): datetime(2008, 5, 1),
datetime(2008, 4, 15): datetime(2008, 5, 1),
datetime(2008, 8, 15): datetime(2008, 11, 3),
datetime(2008, 9, 15): datetime(2008, 11, 3),
datetime(2008, 11, 1): datetime(2008, 11, 3),
datetime(2008, 4, 30): datetime(2008, 5, 1), }))
tests.append((BQuarterBegin(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2007, 12, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 1, 15): datetime(2008, 4, 1),
datetime(2008, 2, 27): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2007, 4, 1): datetime(2007, 4, 2),
datetime(2007, 4, 2): datetime(2007, 4, 2),
datetime(2007, 7, 1): datetime(2007, 7, 2),
datetime(2007, 4, 15): datetime(2007, 7, 2),
datetime(2007, 7, 2): datetime(2007, 7, 2), }))
tests.append((BQuarterBegin(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 1),
datetime(2008, 1, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 1, 1),
datetime(2008, 2, 29): datetime(2008, 1, 1),
datetime(2008, 3, 15): datetime(2008, 1, 1),
datetime(2008, 3, 31): datetime(2008, 1, 1),
datetime(2008, 4, 15): datetime(2008, 4, 1),
datetime(2007, 7, 3): datetime(2007, 7, 2),
datetime(2007, 4, 3): datetime(2007, 4, 2),
datetime(2007, 7, 2): datetime(2007, 4, 2),
datetime(2008, 4, 1): datetime(2008, 1, 1), }))
tests.append((BQuarterBegin(startingMonth=1, n=2),
{datetime(2008, 1, 1): datetime(2008, 7, 1),
datetime(2008, 1, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2007, 3, 31): datetime(2007, 7, 2),
datetime(2007, 4, 15): datetime(2007, 10, 1),
datetime(2008, 4, 30): datetime(2008, 10, 1), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
# corner
offset = BQuarterBegin(n=-1, startingMonth=1)
assert datetime(2007, 4, 3) + offset == datetime(2007, 4, 2)
class TestBQuarterEnd(Base):
_offset = BQuarterEnd
def test_repr(self):
assert (repr(BQuarterEnd()) ==
"<BusinessQuarterEnd: startingMonth=3>")
assert (repr(BQuarterEnd(startingMonth=3)) ==
"<BusinessQuarterEnd: startingMonth=3>")
assert (repr(BQuarterEnd(startingMonth=1)) ==
"<BusinessQuarterEnd: startingMonth=1>")
def test_isAnchored(self):
assert BQuarterEnd(startingMonth=1).isAnchored()
assert BQuarterEnd().isAnchored()
assert not BQuarterEnd(2, startingMonth=1).isAnchored()
def test_offset(self):
tests = []
tests.append((BQuarterEnd(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31), }))
tests.append((BQuarterEnd(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 30),
datetime(2008, 3, 15): datetime(2008, 5, 30),
datetime(2008, 3, 31): datetime(2008, 5, 30),
datetime(2008, 4, 15): datetime(2008, 5, 30),
datetime(2008, 4, 30): datetime(2008, 5, 30), }))
tests.append((BQuarterEnd(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30), }))
tests.append((BQuarterEnd(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31), }))
tests.append((BQuarterEnd(startingMonth=1, n=2),
{datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
# corner
offset = BQuarterEnd(n=-1, startingMonth=1)
assert datetime(2010, 1, 31) + offset == datetime(2010, 1, 29)
def test_onOffset(self):
tests = [
(BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),
(BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True),
(BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False),
]
for offset, dt, expected in tests:
assertOnOffset(offset, dt, expected)
def makeFY5253LastOfMonthQuarter(*args, **kwds):
return FY5253Quarter(*args, variation="last", **kwds)
def makeFY5253NearestEndMonthQuarter(*args, **kwds):
return FY5253Quarter(*args, variation="nearest", **kwds)
def makeFY5253NearestEndMonth(*args, **kwds):
return FY5253(*args, variation="nearest", **kwds)
def makeFY5253LastOfMonth(*args, **kwds):
return FY5253(*args, variation="last", **kwds)
class TestFY5253LastOfMonth(Base):
def test_onOffset(self):
offset_lom_sat_aug = makeFY5253LastOfMonth(1, startingMonth=8,
weekday=WeekDay.SAT)
offset_lom_sat_sep = makeFY5253LastOfMonth(1, startingMonth=9,
weekday=WeekDay.SAT)
tests = [
# From Wikipedia (see:
# http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Last_Saturday_of_the_month_at_fiscal_year_end)
(offset_lom_sat_aug, datetime(2006, 8, 26), True),
(offset_lom_sat_aug, datetime(2007, 8, 25), True),
(offset_lom_sat_aug, datetime(2008, 8, 30), True),
(offset_lom_sat_aug, datetime(2009, 8, 29), True),
(offset_lom_sat_aug, datetime(2010, 8, 28), True),
(offset_lom_sat_aug, datetime(2011, 8, 27), True),
(offset_lom_sat_aug, datetime(2012, 8, 25), True),
(offset_lom_sat_aug, datetime(2013, 8, 31), True),
(offset_lom_sat_aug, datetime(2014, 8, 30), True),
(offset_lom_sat_aug, datetime(2015, 8, 29), True),
(offset_lom_sat_aug, datetime(2016, 8, 27), True),
(offset_lom_sat_aug, datetime(2017, 8, 26), True),
(offset_lom_sat_aug, datetime(2018, 8, 25), True),
(offset_lom_sat_aug, datetime(2019, 8, 31), True),
(offset_lom_sat_aug, datetime(2006, 8, 27), False),
(offset_lom_sat_aug, datetime(2007, 8, 28), False),
(offset_lom_sat_aug, datetime(2008, 8, 31), False),
(offset_lom_sat_aug, datetime(2009, 8, 30), False),
(offset_lom_sat_aug, datetime(2010, 8, 29), False),
(offset_lom_sat_aug, datetime(2011, 8, 28), False),
(offset_lom_sat_aug, datetime(2006, 8, 25), False),
(offset_lom_sat_aug, datetime(2007, 8, 24), False),
(offset_lom_sat_aug, datetime(2008, 8, 29), False),
(offset_lom_sat_aug, datetime(2009, 8, 28), False),
(offset_lom_sat_aug, datetime(2010, 8, 27), False),
(offset_lom_sat_aug, datetime(2011, 8, 26), False),
(offset_lom_sat_aug, datetime(2019, 8, 30), False),
# From GMCR (see for example:
# http://yahoo.brand.edgar-online.com/Default.aspx?
# companyid=3184&formtypeID=7)
(offset_lom_sat_sep, datetime(2010, 9, 25), True),
(offset_lom_sat_sep, datetime(2011, 9, 24), True),
(offset_lom_sat_sep, datetime(2012, 9, 29), True),
]
for offset, dt, expected in tests:
assertOnOffset(offset, dt, expected)
def test_apply(self):
offset_lom_aug_sat = makeFY5253LastOfMonth(startingMonth=8,
weekday=WeekDay.SAT)
offset_lom_aug_sat_1 = makeFY5253LastOfMonth(n=1, startingMonth=8,
weekday=WeekDay.SAT)
date_seq_lom_aug_sat = [datetime(2006, 8, 26), datetime(2007, 8, 25),
datetime(2008, 8, 30), datetime(2009, 8, 29),
datetime(2010, 8, 28), datetime(2011, 8, 27),
datetime(2012, 8, 25), datetime(2013, 8, 31),
datetime(2014, 8, 30), datetime(2015, 8, 29),
datetime(2016, 8, 27)]
tests = [
(offset_lom_aug_sat, date_seq_lom_aug_sat),
(offset_lom_aug_sat_1, date_seq_lom_aug_sat),
(offset_lom_aug_sat, [
datetime(2006, 8, 25)] + date_seq_lom_aug_sat),
(offset_lom_aug_sat_1, [
datetime(2006, 8, 27)] + date_seq_lom_aug_sat[1:]),
(makeFY5253LastOfMonth(n=-1, startingMonth=8,
weekday=WeekDay.SAT),
list(reversed(date_seq_lom_aug_sat))),
]
for test in tests:
offset, data = test
current = data[0]
for datum in data[1:]:
current = current + offset
assert current == datum
class TestFY5253NearestEndMonth(Base):
def test_get_target_month_end(self):
assert (makeFY5253NearestEndMonth(
startingMonth=8, weekday=WeekDay.SAT).get_target_month_end(
datetime(2013, 1, 1)) == datetime(2013, 8, 31))
assert (makeFY5253NearestEndMonth(
startingMonth=12, weekday=WeekDay.SAT).get_target_month_end(
datetime(2013, 1, 1)) == datetime(2013, 12, 31))
assert (makeFY5253NearestEndMonth(
startingMonth=2, weekday=WeekDay.SAT).get_target_month_end(
datetime(2013, 1, 1)) == datetime(2013, 2, 28))
def test_get_year_end(self):
assert (makeFY5253NearestEndMonth(
startingMonth=8, weekday=WeekDay.SAT).get_year_end(
datetime(2013, 1, 1)) == datetime(2013, 8, 31))
assert (makeFY5253NearestEndMonth(
startingMonth=8, weekday=WeekDay.SUN).get_year_end(
datetime(2013, 1, 1)) == datetime(2013, 9, 1))
assert (makeFY5253NearestEndMonth(
startingMonth=8, weekday=WeekDay.FRI).get_year_end(
datetime(2013, 1, 1)) == datetime(2013, 8, 30))
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
variation="nearest")
assert (offset_n.get_year_end(datetime(2012, 1, 1)) ==
datetime(2013, 1, 1))
assert (offset_n.get_year_end(datetime(2012, 1, 10)) ==
datetime(2013, 1, 1))
assert (offset_n.get_year_end(datetime(2013, 1, 1)) ==
datetime(2013, 12, 31))
assert (offset_n.get_year_end(datetime(2013, 1, 2)) ==
datetime(2013, 12, 31))
assert (offset_n.get_year_end(datetime(2013, 1, 3)) ==
datetime(2013, 12, 31))
assert (offset_n.get_year_end(datetime(2013, 1, 10)) ==
datetime(2013, 12, 31))
JNJ = FY5253(n=1, startingMonth=12, weekday=6, variation="nearest")
assert (JNJ.get_year_end(datetime(2006, 1, 1)) ==
datetime(2006, 12, 31))
def test_onOffset(self):
offset_lom_aug_sat = makeFY5253NearestEndMonth(1, startingMonth=8,
weekday=WeekDay.SAT)
offset_lom_aug_thu = makeFY5253NearestEndMonth(1, startingMonth=8,
weekday=WeekDay.THU)
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
variation="nearest")
tests = [
# From Wikipedia (see:
# http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
# #Saturday_nearest_the_end_of_month)
# 2006-09-02 2006 September 2
# 2007-09-01 2007 September 1
# 2008-08-30 2008 August 30 (leap year)
# 2009-08-29 2009 August 29
# 2010-08-28 2010 August 28
# 2011-09-03 2011 September 3
# 2012-09-01 2012 September 1 (leap year)
# 2013-08-31 2013 August 31
# 2014-08-30 2014 August 30
# 2015-08-29 2015 August 29
# 2016-09-03 2016 September 3 (leap year)
# 2017-09-02 2017 September 2
# 2018-09-01 2018 September 1
# 2019-08-31 2019 August 31
(offset_lom_aug_sat, datetime(2006, 9, 2), True),
(offset_lom_aug_sat, datetime(2007, 9, 1), True),
(offset_lom_aug_sat, datetime(2008, 8, 30), True),
(offset_lom_aug_sat, datetime(2009, 8, 29), True),
(offset_lom_aug_sat, datetime(2010, 8, 28), True),
(offset_lom_aug_sat, datetime(2011, 9, 3), True),
(offset_lom_aug_sat, datetime(2016, 9, 3), True),
(offset_lom_aug_sat, datetime(2017, 9, 2), True),
(offset_lom_aug_sat, datetime(2018, 9, 1), True),
(offset_lom_aug_sat, datetime(2019, 8, 31), True),
(offset_lom_aug_sat, datetime(2006, 8, 27), False),
(offset_lom_aug_sat, datetime(2007, 8, 28), False),
(offset_lom_aug_sat, datetime(2008, 8, 31), False),
(offset_lom_aug_sat, datetime(2009, 8, 30), False),
(offset_lom_aug_sat, datetime(2010, 8, 29), False),
(offset_lom_aug_sat, datetime(2011, 8, 28), False),
(offset_lom_aug_sat, datetime(2006, 8, 25), False),
(offset_lom_aug_sat, datetime(2007, 8, 24), False),
(offset_lom_aug_sat, datetime(2008, 8, 29), False),
(offset_lom_aug_sat, datetime(2009, 8, 28), False),
(offset_lom_aug_sat, datetime(2010, 8, 27), False),
(offset_lom_aug_sat, datetime(2011, 8, 26), False),
(offset_lom_aug_sat, datetime(2019, 8, 30), False),
# From Micron, see:
# http://google.brand.edgar-online.com/?sym=MU&formtypeID=7
(offset_lom_aug_thu, datetime(2012, 8, 30), True),
(offset_lom_aug_thu, datetime(2011, 9, 1), True),
(offset_n, datetime(2012, 12, 31), False),
(offset_n, datetime(2013, 1, 1), True),
(offset_n, datetime(2013, 1, 2), False),
]
for offset, dt, expected in tests:
assertOnOffset(offset, dt, expected)
def test_apply(self):
date_seq_nem_8_sat = [datetime(2006, 9, 2), datetime(2007, 9, 1),
datetime(2008, 8, 30), datetime(2009, 8, 29),
datetime(2010, 8, 28), datetime(2011, 9, 3)]
JNJ = [datetime(2005, 1, 2), datetime(2006, 1, 1),
datetime(2006, 12, 31), datetime(2007, 12, 30),
datetime(2008, 12, 28), datetime(2010, 1, 3),
datetime(2011, 1, 2), datetime(2012, 1, 1),
datetime(2012, 12, 30)]
DEC_SAT = FY5253(n=-1, startingMonth=12, weekday=5,
variation="nearest")
tests = [
(makeFY5253NearestEndMonth(startingMonth=8,
weekday=WeekDay.SAT),
date_seq_nem_8_sat),
(makeFY5253NearestEndMonth(n=1, startingMonth=8,
weekday=WeekDay.SAT),
date_seq_nem_8_sat),
(makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT),
[datetime(2006, 9, 1)] + date_seq_nem_8_sat),
(makeFY5253NearestEndMonth(n=1, startingMonth=8,
weekday=WeekDay.SAT),
[datetime(2006, 9, 3)] + date_seq_nem_8_sat[1:]),
(makeFY5253NearestEndMonth(n=-1, startingMonth=8,
weekday=WeekDay.SAT),
list(reversed(date_seq_nem_8_sat))),
(makeFY5253NearestEndMonth(n=1, startingMonth=12,
weekday=WeekDay.SUN), JNJ),
(makeFY5253NearestEndMonth(n=-1, startingMonth=12,
weekday=WeekDay.SUN),
list(reversed(JNJ))),
(makeFY5253NearestEndMonth(n=1, startingMonth=12,
weekday=WeekDay.SUN),
[datetime(2005, 1, 2), datetime(2006, 1, 1)]),
(makeFY5253NearestEndMonth(n=1, startingMonth=12,
weekday=WeekDay.SUN),
[datetime(2006, 1, 2), datetime(2006, 12, 31)]),
(DEC_SAT, [datetime(2013, 1, 15), datetime(2012, 12, 29)])
]
for test in tests:
offset, data = test
current = data[0]
for datum in data[1:]:
current = current + offset
assert current == datum
class TestFY5253LastOfMonthQuarter(Base):
def test_isAnchored(self):
assert makeFY5253LastOfMonthQuarter(
startingMonth=1, weekday=WeekDay.SAT,
qtr_with_extra_week=4).isAnchored()
assert makeFY5253LastOfMonthQuarter(
weekday=WeekDay.SAT, startingMonth=3,
qtr_with_extra_week=4).isAnchored()
assert not makeFY5253LastOfMonthQuarter(
2, startingMonth=1, weekday=WeekDay.SAT,
qtr_with_extra_week=4).isAnchored()
def test_equality(self):
assert (makeFY5253LastOfMonthQuarter(
startingMonth=1, weekday=WeekDay.SAT,
qtr_with_extra_week=4) == makeFY5253LastOfMonthQuarter(
startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4))
assert (makeFY5253LastOfMonthQuarter(
startingMonth=1, weekday=WeekDay.SAT,
qtr_with_extra_week=4) != makeFY5253LastOfMonthQuarter(
startingMonth=1, weekday=WeekDay.SUN, qtr_with_extra_week=4))
assert (makeFY5253LastOfMonthQuarter(
startingMonth=1, weekday=WeekDay.SAT,
qtr_with_extra_week=4) != makeFY5253LastOfMonthQuarter(
startingMonth=2, weekday=WeekDay.SAT, qtr_with_extra_week=4))
def test_offset(self):
offset = makeFY5253LastOfMonthQuarter(1, startingMonth=9,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
offset2 = makeFY5253LastOfMonthQuarter(2, startingMonth=9,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
offset4 = makeFY5253LastOfMonthQuarter(4, startingMonth=9,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
offset_neg1 = makeFY5253LastOfMonthQuarter(-1, startingMonth=9,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
offset_neg2 = makeFY5253LastOfMonthQuarter(-2, startingMonth=9,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
GMCR = [datetime(2010, 3, 27), datetime(2010, 6, 26),
datetime(2010, 9, 25), datetime(2010, 12, 25),
datetime(2011, 3, 26), datetime(2011, 6, 25),
datetime(2011, 9, 24), datetime(2011, 12, 24),
datetime(2012, 3, 24), datetime(2012, 6, 23),
datetime(2012, 9, 29), datetime(2012, 12, 29),
datetime(2013, 3, 30), datetime(2013, 6, 29)]
assertEq(offset, base=GMCR[0], expected=GMCR[1])
assertEq(offset, base=GMCR[0] + relativedelta(days=-1),
expected=GMCR[0])
assertEq(offset, base=GMCR[1], expected=GMCR[2])
assertEq(offset2, base=GMCR[0], expected=GMCR[2])
assertEq(offset4, base=GMCR[0], expected=GMCR[4])
assertEq(offset_neg1, base=GMCR[-1], expected=GMCR[-2])
assertEq(offset_neg1, base=GMCR[-1] + relativedelta(days=+1),
expected=GMCR[-1])
assertEq(offset_neg2, base=GMCR[-1], expected=GMCR[-3])
date = GMCR[0] + relativedelta(days=-1)
for expected in GMCR:
assertEq(offset, date, expected)
date = date + offset
date = GMCR[-1] + relativedelta(days=+1)
for expected in reversed(GMCR):
assertEq(offset_neg1, date, expected)
date = date + offset_neg1
def test_onOffset(self):
lomq_aug_sat_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=8,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
lomq_sep_sat_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=9,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
tests = [
# From Wikipedia
(lomq_aug_sat_4, datetime(2006, 8, 26), True),
(lomq_aug_sat_4, datetime(2007, 8, 25), True),
(lomq_aug_sat_4, datetime(2008, 8, 30), True),
(lomq_aug_sat_4, datetime(2009, 8, 29), True),
(lomq_aug_sat_4, datetime(2010, 8, 28), True),
(lomq_aug_sat_4, datetime(2011, 8, 27), True),
(lomq_aug_sat_4, datetime(2019, 8, 31), True),
(lomq_aug_sat_4, datetime(2006, 8, 27), False),
(lomq_aug_sat_4, datetime(2007, 8, 28), False),
(lomq_aug_sat_4, datetime(2008, 8, 31), False),
(lomq_aug_sat_4, datetime(2009, 8, 30), False),
(lomq_aug_sat_4, datetime(2010, 8, 29), False),
(lomq_aug_sat_4, datetime(2011, 8, 28), False),
(lomq_aug_sat_4, datetime(2006, 8, 25), False),
(lomq_aug_sat_4, datetime(2007, 8, 24), False),
(lomq_aug_sat_4, datetime(2008, 8, 29), False),
(lomq_aug_sat_4, datetime(2009, 8, 28), False),
(lomq_aug_sat_4, datetime(2010, 8, 27), False),
(lomq_aug_sat_4, datetime(2011, 8, 26), False),
(lomq_aug_sat_4, datetime(2019, 8, 30), False),
# From GMCR
(lomq_sep_sat_4, datetime(2010, 9, 25), True),
(lomq_sep_sat_4, datetime(2011, 9, 24), True),
(lomq_sep_sat_4, datetime(2012, 9, 29), True),
(lomq_sep_sat_4, datetime(2013, 6, 29), True),
(lomq_sep_sat_4, datetime(2012, 6, 23), True),
(lomq_sep_sat_4, datetime(2012, 6, 30), False),
(lomq_sep_sat_4, datetime(2013, 3, 30), True),
(lomq_sep_sat_4, datetime(2012, 3, 24), True),
(lomq_sep_sat_4, datetime(2012, 12, 29), True),
(lomq_sep_sat_4, datetime(2011, 12, 24), True),
# INTC (extra week in Q1)
# See: http://www.intc.com/releasedetail.cfm?ReleaseID=542844
(makeFY5253LastOfMonthQuarter(1, startingMonth=12,
weekday=WeekDay.SAT,
qtr_with_extra_week=1),
datetime(2011, 4, 2), True),
# see: http://google.brand.edgar-online.com/?sym=INTC&formtypeID=7
(makeFY5253LastOfMonthQuarter(1, startingMonth=12,
weekday=WeekDay.SAT,
qtr_with_extra_week=1),
datetime(2012, 12, 29), True),
(makeFY5253LastOfMonthQuarter(1, startingMonth=12,
weekday=WeekDay.SAT,
qtr_with_extra_week=1),
datetime(2011, 12, 31), True),
(makeFY5253LastOfMonthQuarter(1, startingMonth=12,
weekday=WeekDay.SAT,
qtr_with_extra_week=1),
datetime(2010, 12, 25), True),
]
for offset, dt, expected in tests:
assertOnOffset(offset, dt, expected)
def test_year_has_extra_week(self):
# End of long Q1
assert makeFY5253LastOfMonthQuarter(
1, startingMonth=12, weekday=WeekDay.SAT,
qtr_with_extra_week=1).year_has_extra_week(datetime(2011, 4, 2))
# Start of long Q1
assert makeFY5253LastOfMonthQuarter(
1, startingMonth=12, weekday=WeekDay.SAT,
qtr_with_extra_week=1).year_has_extra_week(datetime(2010, 12, 26))
# End of year before year with long Q1
assert not makeFY5253LastOfMonthQuarter(
1, startingMonth=12, weekday=WeekDay.SAT,
qtr_with_extra_week=1).year_has_extra_week(datetime(2010, 12, 25))
for year in [x
for x in range(1994, 2011 + 1)
if x not in [2011, 2005, 2000, 1994]]:
assert not makeFY5253LastOfMonthQuarter(
1, startingMonth=12, weekday=WeekDay.SAT,
qtr_with_extra_week=1).year_has_extra_week(
datetime(year, 4, 2))
# Other long years
assert makeFY5253LastOfMonthQuarter(
1, startingMonth=12, weekday=WeekDay.SAT,
qtr_with_extra_week=1).year_has_extra_week(datetime(2005, 4, 2))
assert makeFY5253LastOfMonthQuarter(
1, startingMonth=12, weekday=WeekDay.SAT,
qtr_with_extra_week=1).year_has_extra_week(datetime(2000, 4, 2))
assert makeFY5253LastOfMonthQuarter(
1, startingMonth=12, weekday=WeekDay.SAT,
qtr_with_extra_week=1).year_has_extra_week(datetime(1994, 4, 2))
def test_get_weeks(self):
sat_dec_1 = makeFY5253LastOfMonthQuarter(1, startingMonth=12,
weekday=WeekDay.SAT,
qtr_with_extra_week=1)
sat_dec_4 = makeFY5253LastOfMonthQuarter(1, startingMonth=12,
weekday=WeekDay.SAT,
qtr_with_extra_week=4)
assert sat_dec_1.get_weeks(datetime(2011, 4, 2)) == [14, 13, 13, 13]
assert sat_dec_4.get_weeks(datetime(2011, 4, 2)) == [13, 13, 13, 14]
assert sat_dec_1.get_weeks(datetime(2010, 12, 25)) == [13, 13, 13, 13]
class TestFY5253NearestEndMonthQuarter(Base):
def test_onOffset(self):
offset_nem_sat_aug_4 = makeFY5253NearestEndMonthQuarter(
1, startingMonth=8, weekday=WeekDay.SAT,
qtr_with_extra_week=4)
offset_nem_thu_aug_4 = makeFY5253NearestEndMonthQuarter(
1, startingMonth=8, weekday=WeekDay.THU,
qtr_with_extra_week=4)
offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12,
variation="nearest", qtr_with_extra_week=4)
tests = [
# From Wikipedia
(offset_nem_sat_aug_4, datetime(2006, 9, 2), True),
(offset_nem_sat_aug_4, datetime(2007, 9, 1), True),
(offset_nem_sat_aug_4, datetime(2008, 8, 30), True),
(offset_nem_sat_aug_4, datetime(2009, 8, 29), True),
(offset_nem_sat_aug_4, datetime(2010, 8, 28), True),
(offset_nem_sat_aug_4, datetime(2011, 9, 3), True),
(offset_nem_sat_aug_4, datetime(2016, 9, 3), True),
(offset_nem_sat_aug_4, datetime(2017, 9, 2), True),
(offset_nem_sat_aug_4, datetime(2018, 9, 1), True),
(offset_nem_sat_aug_4, datetime(2019, 8, 31), True),
(offset_nem_sat_aug_4, datetime(2006, 8, 27), False),
(offset_nem_sat_aug_4, datetime(2007, 8, 28), False),
(offset_nem_sat_aug_4, datetime(2008, 8, 31), False),
(offset_nem_sat_aug_4, datetime(2009, 8, 30), False),
(offset_nem_sat_aug_4, datetime(2010, 8, 29), False),
(offset_nem_sat_aug_4, datetime(2011, 8, 28), False),
(offset_nem_sat_aug_4, datetime(2006, 8, 25), False),
(offset_nem_sat_aug_4, datetime(2007, 8, 24), False),
(offset_nem_sat_aug_4, datetime(2008, 8, 29), False),
(offset_nem_sat_aug_4, datetime(2009, 8, 28), False),
(offset_nem_sat_aug_4, datetime(2010, 8, 27), False),
(offset_nem_sat_aug_4, datetime(2011, 8, 26), False),
(offset_nem_sat_aug_4, datetime(2019, 8, 30), False),
# From Micron, see:
# http://google.brand.edgar-online.com/?sym=MU&formtypeID=7
(offset_nem_thu_aug_4, datetime(2012, 8, 30), True),
(offset_nem_thu_aug_4, datetime(2011, 9, 1), True),
# See: http://google.brand.edgar-online.com/?sym=MU&formtypeID=13
(offset_nem_thu_aug_4, datetime(2013, 5, 30), True),
(offset_nem_thu_aug_4, datetime(2013, 2, 28), True),
(offset_nem_thu_aug_4, datetime(2012, 11, 29), True),
(offset_nem_thu_aug_4, datetime(2012, 5, 31), True),
(offset_nem_thu_aug_4, datetime(2007, 3, 1), True),
(offset_nem_thu_aug_4, datetime(1994, 3, 3), True),
(offset_n, datetime(2012, 12, 31), False),
(offset_n, datetime(2013, 1, 1), True),
(offset_n, datetime(2013, 1, 2), False)
]
for offset, dt, expected in tests:
assertOnOffset(offset, dt, expected)
def test_offset(self):
offset = makeFY5253NearestEndMonthQuarter(1, startingMonth=8,
weekday=WeekDay.THU,
qtr_with_extra_week=4)
MU = [datetime(2012, 5, 31), datetime(2012, 8, 30), datetime(2012, 11,
29),
datetime(2013, 2, 28), datetime(2013, 5, 30)]
date = MU[0] + relativedelta(days=-1)
for expected in MU:
assertEq(offset, date, expected)
date = date + offset
assertEq(offset, datetime(2012, 5, 31), datetime(2012, 8, 30))
assertEq(offset, datetime(2012, 5, 30), datetime(2012, 5, 31))
offset2 = FY5253Quarter(weekday=5, startingMonth=12, variation="last",
qtr_with_extra_week=4)
assertEq(offset2, datetime(2013, 1, 15), datetime(2013, 3, 30))
class TestQuarterBegin(Base):
def test_repr(self):
assert (repr(QuarterBegin()) ==
"<QuarterBegin: startingMonth=3>")
assert (repr(QuarterBegin(startingMonth=3)) ==
"<QuarterBegin: startingMonth=3>")
assert (repr(QuarterBegin(startingMonth=1)) ==
"<QuarterBegin: startingMonth=1>")
def test_isAnchored(self):
assert QuarterBegin(startingMonth=1).isAnchored()
assert QuarterBegin().isAnchored()
assert not QuarterBegin(2, startingMonth=1).isAnchored()
def test_offset(self):
tests = []
tests.append((QuarterBegin(startingMonth=1),
{datetime(2007, 12, 1): datetime(2008, 1, 1),
datetime(2008, 1, 1): datetime(2008, 4, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2008, 4, 1): datetime(2008, 7, 1), }))
tests.append((QuarterBegin(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 2, 29): datetime(2008, 5, 1),
datetime(2008, 3, 15): datetime(2008, 5, 1),
datetime(2008, 3, 31): datetime(2008, 5, 1),
datetime(2008, 4, 15): datetime(2008, 5, 1),
datetime(2008, 4, 30): datetime(2008, 5, 1), }))
tests.append((QuarterBegin(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 12, 1): datetime(2009, 1, 1),
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 4, 1),
datetime(2008, 2, 29): datetime(2008, 4, 1),
datetime(2008, 3, 15): datetime(2008, 4, 1),
datetime(2008, 3, 31): datetime(2008, 4, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2008, 4, 30): datetime(2008, 7, 1), }))
tests.append((QuarterBegin(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 1),
datetime(2008, 1, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 1, 1),
datetime(2008, 2, 29): datetime(2008, 1, 1),
datetime(2008, 3, 15): datetime(2008, 1, 1),
datetime(2008, 3, 31): datetime(2008, 1, 1),
datetime(2008, 4, 15): datetime(2008, 4, 1),
datetime(2008, 4, 30): datetime(2008, 4, 1),
datetime(2008, 7, 1): datetime(2008, 4, 1)}))
tests.append((QuarterBegin(startingMonth=1, n=2),
{datetime(2008, 1, 1): datetime(2008, 7, 1),
datetime(2008, 2, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2008, 3, 31): datetime(2008, 7, 1),
datetime(2008, 4, 15): datetime(2008, 10, 1),
datetime(2008, 4, 1): datetime(2008, 10, 1), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
# corner
offset = QuarterBegin(n=-1, startingMonth=1)
assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 1)
class TestQuarterEnd(Base):
_offset = QuarterEnd
def test_repr(self):
assert (repr(QuarterEnd()) ==
"<QuarterEnd: startingMonth=3>")
assert (repr(QuarterEnd(startingMonth=3)) ==
"<QuarterEnd: startingMonth=3>")
assert (repr(QuarterEnd(startingMonth=1)) ==
"<QuarterEnd: startingMonth=1>")
def test_isAnchored(self):
assert QuarterEnd(startingMonth=1).isAnchored()
assert QuarterEnd().isAnchored()
assert not QuarterEnd(2, startingMonth=1).isAnchored()
def test_offset(self):
tests = []
tests.append((QuarterEnd(startingMonth=1),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 4, 30),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 7, 31), }))
tests.append((QuarterEnd(startingMonth=2),
{datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2008, 2, 15): datetime(2008, 2, 29),
datetime(2008, 2, 29): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 5, 31),
datetime(2008, 3, 31): datetime(2008, 5, 31),
datetime(2008, 4, 15): datetime(2008, 5, 31),
datetime(2008, 4, 30): datetime(2008, 5, 31), }))
tests.append((QuarterEnd(startingMonth=1, n=0),
{datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2008, 2, 15): datetime(2008, 4, 30),
datetime(2008, 2, 29): datetime(2008, 4, 30),
datetime(2008, 3, 15): datetime(2008, 4, 30),
datetime(2008, 3, 31): datetime(2008, 4, 30),
datetime(2008, 4, 15): datetime(2008, 4, 30),
datetime(2008, 4, 30): datetime(2008, 4, 30), }))
tests.append((QuarterEnd(startingMonth=1, n=-1),
{datetime(2008, 1, 1): datetime(2007, 10, 31),
datetime(2008, 1, 31): datetime(2007, 10, 31),
datetime(2008, 2, 15): datetime(2008, 1, 31),
datetime(2008, 2, 29): datetime(2008, 1, 31),
datetime(2008, 3, 15): datetime(2008, 1, 31),
datetime(2008, 3, 31): datetime(2008, 1, 31),
datetime(2008, 4, 15): datetime(2008, 1, 31),
datetime(2008, 4, 30): datetime(2008, 1, 31),
datetime(2008, 7, 1): datetime(2008, 4, 30)}))
tests.append((QuarterEnd(startingMonth=1, n=2),
{datetime(2008, 1, 31): datetime(2008, 7, 31),
datetime(2008, 2, 15): datetime(2008, 7, 31),
datetime(2008, 2, 29): datetime(2008, 7, 31),
datetime(2008, 3, 15): datetime(2008, 7, 31),
datetime(2008, 3, 31): datetime(2008, 7, 31),
datetime(2008, 4, 15): datetime(2008, 7, 31),
datetime(2008, 4, 30): datetime(2008, 10, 31), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
# corner
offset = QuarterEnd(n=-1, startingMonth=1)
assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 31)
def test_onOffset(self):
tests = [(QuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),
(QuarterEnd(1, startingMonth=1), datetime(2007, 12, 31),
False),
(QuarterEnd(1, startingMonth=1), datetime(2008, 2, 29),
False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 3, 30),
False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 3, 31),
False),
(QuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),
(QuarterEnd(1, startingMonth=1), datetime(2008, 5, 30),
False),
(QuarterEnd(1, startingMonth=1), datetime(2008, 5, 31),
False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 6, 29),
False),
(QuarterEnd(1, startingMonth=1), datetime(2007, 6, 30),
False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 1, 31),
False),
(QuarterEnd(1, startingMonth=2), datetime(2007, 12, 31),
False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),
(QuarterEnd(1, startingMonth=2), datetime(2007, 3, 30),
False),
(QuarterEnd(1, startingMonth=2), datetime(2007, 3, 31),
False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 4, 30),
False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30),
False),
(QuarterEnd(1, startingMonth=2), datetime(2008, 5, 31), True),
(QuarterEnd(1, startingMonth=2), datetime(2007, 6, 29),
False),
(QuarterEnd(1, startingMonth=2), datetime(2007, 6, 30),
False),
(QuarterEnd(1, startingMonth=3), datetime(2008, 1, 31),
False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 12, 31),
True),
(QuarterEnd(1, startingMonth=3), datetime(2008, 2, 29),
False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30),
False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), True),
(QuarterEnd(1, startingMonth=3), datetime(2008, 4, 30),
False),
(QuarterEnd(1, startingMonth=3), datetime(2008, 5, 30),
False),
(QuarterEnd(1, startingMonth=3), datetime(2008, 5, 31),
False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 6, 29),
False),
(QuarterEnd(1, startingMonth=3), datetime(2007, 6, 30),
True), ]
for offset, dt, expected in tests:
assertOnOffset(offset, dt, expected)
class TestBYearBegin(Base):
_offset = BYearBegin
def test_misspecified(self):
pytest.raises(ValueError, BYearBegin, month=13)
pytest.raises(ValueError, BYearEnd, month=13)
def test_offset(self):
tests = []
tests.append((BYearBegin(),
{datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2011, 1, 1): datetime(2011, 1, 3),
datetime(2011, 1, 3): datetime(2012, 1, 2),
datetime(2005, 12, 30): datetime(2006, 1, 2),
datetime(2005, 12, 31): datetime(2006, 1, 2)}))
tests.append((BYearBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 2),
datetime(2005, 12, 31): datetime(2006, 1, 2), }))
tests.append((BYearBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 1, 2),
datetime(2009, 1, 4): datetime(2009, 1, 1),
datetime(2009, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2008, 1, 1),
datetime(2008, 12, 31): datetime(2008, 1, 1),
datetime(2006, 12, 29): datetime(2006, 1, 2),
datetime(2006, 12, 30): datetime(2006, 1, 2),
datetime(2006, 1, 1): datetime(2005, 1, 3), }))
tests.append((BYearBegin(-2),
{datetime(2007, 1, 1): datetime(2005, 1, 3),
datetime(2007, 6, 30): datetime(2006, 1, 2),
datetime(2008, 12, 31): datetime(2007, 1, 1), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
class TestYearBegin(Base):
_offset = YearBegin
def test_misspecified(self):
pytest.raises(ValueError, YearBegin, month=13)
def test_offset(self):
tests = []
tests.append((YearBegin(),
{datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1), }))
tests.append((YearBegin(0),
{datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1), }))
tests.append((YearBegin(3),
{datetime(2008, 1, 1): datetime(2011, 1, 1),
datetime(2008, 6, 30): datetime(2011, 1, 1),
datetime(2008, 12, 31): datetime(2011, 1, 1),
datetime(2005, 12, 30): datetime(2008, 1, 1),
datetime(2005, 12, 31): datetime(2008, 1, 1), }))
tests.append((YearBegin(-1),
{datetime(2007, 1, 1): datetime(2006, 1, 1),
datetime(2007, 1, 15): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 1, 1),
datetime(2008, 12, 31): datetime(2008, 1, 1),
datetime(2006, 12, 29): datetime(2006, 1, 1),
datetime(2006, 12, 30): datetime(2006, 1, 1),
datetime(2007, 1, 1): datetime(2006, 1, 1), }))
tests.append((YearBegin(-2),
{datetime(2007, 1, 1): datetime(2005, 1, 1),
datetime(2008, 6, 30): datetime(2007, 1, 1),
datetime(2008, 12, 31): datetime(2007, 1, 1), }))
tests.append((YearBegin(month=4),
{datetime(2007, 4, 1): datetime(2008, 4, 1),
datetime(2007, 4, 15): datetime(2008, 4, 1),
datetime(2007, 3, 1): datetime(2007, 4, 1),
datetime(2007, 12, 15): datetime(2008, 4, 1),
datetime(2012, 1, 31): datetime(2012, 4, 1), }))
tests.append((YearBegin(0, month=4),
{datetime(2007, 4, 1): datetime(2007, 4, 1),
datetime(2007, 3, 1): datetime(2007, 4, 1),
datetime(2007, 12, 15): datetime(2008, 4, 1),
datetime(2012, 1, 31): datetime(2012, 4, 1), }))
tests.append((YearBegin(4, month=4),
{datetime(2007, 4, 1): datetime(2011, 4, 1),
datetime(2007, 4, 15): datetime(2011, 4, 1),
datetime(2007, 3, 1): datetime(2010, 4, 1),
datetime(2007, 12, 15): datetime(2011, 4, 1),
datetime(2012, 1, 31): datetime(2015, 4, 1), }))
tests.append((YearBegin(-1, month=4),
{datetime(2007, 4, 1): datetime(2006, 4, 1),
datetime(2007, 3, 1): datetime(2006, 4, 1),
datetime(2007, 12, 15): datetime(2007, 4, 1),
datetime(2012, 1, 31): datetime(2011, 4, 1), }))
tests.append((YearBegin(-3, month=4),
{datetime(2007, 4, 1): datetime(2004, 4, 1),
datetime(2007, 3, 1): datetime(2004, 4, 1),
datetime(2007, 12, 15): datetime(2005, 4, 1),
datetime(2012, 1, 31): datetime(2009, 4, 1), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
tests = [
(YearBegin(), datetime(2007, 1, 3), False),
(YearBegin(), datetime(2008, 1, 1), True),
(YearBegin(), datetime(2006, 12, 31), False),
(YearBegin(), datetime(2006, 1, 2), False),
]
for offset, dt, expected in tests:
assertOnOffset(offset, dt, expected)
class TestBYearEndLagged(Base):
def test_bad_month_fail(self):
pytest.raises(Exception, BYearEnd, month=13)
pytest.raises(Exception, BYearEnd, month=0)
def test_offset(self):
tests = []
tests.append((BYearEnd(month=6),
{datetime(2008, 1, 1): datetime(2008, 6, 30),
datetime(2007, 6, 30): datetime(2008, 6, 30)}, ))
tests.append((BYearEnd(n=-1, month=6),
{datetime(2008, 1, 1): datetime(2007, 6, 29),
datetime(2007, 6, 30): datetime(2007, 6, 29)}, ))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assert base + offset == expected
def test_roll(self):
offset = BYearEnd(month=6)
date = datetime(2009, 11, 30)
assert offset.rollforward(date) == datetime(2010, 6, 30)
assert offset.rollback(date) == datetime(2009, 6, 30)
def test_onOffset(self):
tests = [
(BYearEnd(month=2), datetime(2007, 2, 28), True),
(BYearEnd(month=6), datetime(2007, 6, 30), False),
]
for offset, dt, expected in tests:
assertOnOffset(offset, dt, expected)
class TestBYearEnd(Base):
_offset = BYearEnd
def test_offset(self):
tests = []
tests.append((BYearEnd(),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2006, 12, 29),
datetime(2005, 12, 31): datetime(2006, 12, 29), }))
tests.append((BYearEnd(0),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 29), }))
tests.append((BYearEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 29),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 30),
datetime(2006, 12, 30): datetime(2006, 12, 29),
datetime(2007, 1, 1): datetime(2006, 12, 29), }))
tests.append((BYearEnd(-2),
{datetime(2007, 1, 1): datetime(2005, 12, 30),
datetime(2008, 6, 30): datetime(2006, 12, 29),
datetime(2008, 12, 31): datetime(2006, 12, 29), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
tests = [
(BYearEnd(), datetime(2007, 12, 31), True),
(BYearEnd(), datetime(2008, 1, 1), False),
(BYearEnd(), datetime(2006, 12, 31), False),
(BYearEnd(), datetime(2006, 12, 29), True),
]
for offset, dt, expected in tests:
assertOnOffset(offset, dt, expected)
class TestYearEnd(Base):
_offset = YearEnd
def test_misspecified(self):
pytest.raises(ValueError, YearEnd, month=13)
def test_offset(self):
tests = []
tests.append((YearEnd(),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 31), }))
tests.append((YearEnd(0),
{datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31), }))
tests.append((YearEnd(-1),
{datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 31),
datetime(2006, 12, 30): datetime(2005, 12, 31),
datetime(2007, 1, 1): datetime(2006, 12, 31), }))
tests.append((YearEnd(-2),
{datetime(2007, 1, 1): datetime(2005, 12, 31),
datetime(2008, 6, 30): datetime(2006, 12, 31),
datetime(2008, 12, 31): datetime(2006, 12, 31), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
tests = [
(YearEnd(), datetime(2007, 12, 31), True),
(YearEnd(), datetime(2008, 1, 1), False),
(YearEnd(), datetime(2006, 12, 31), True),
(YearEnd(), datetime(2006, 12, 29), False),
]
for offset, dt, expected in tests:
assertOnOffset(offset, dt, expected)
class TestYearEndDiffMonth(Base):
def test_offset(self):
tests = []
tests.append((YearEnd(month=3),
{datetime(2008, 1, 1): datetime(2008, 3, 31),
datetime(2008, 2, 15): datetime(2008, 3, 31),
datetime(2008, 3, 31): datetime(2009, 3, 31),
datetime(2008, 3, 30): datetime(2008, 3, 31),
datetime(2005, 3, 31): datetime(2006, 3, 31),
datetime(2006, 7, 30): datetime(2007, 3, 31)}))
tests.append((YearEnd(0, month=3),
{datetime(2008, 1, 1): datetime(2008, 3, 31),
datetime(2008, 2, 28): datetime(2008, 3, 31),
datetime(2008, 3, 31): datetime(2008, 3, 31),
datetime(2005, 3, 30): datetime(2005, 3, 31), }))
tests.append((YearEnd(-1, month=3),
{datetime(2007, 1, 1): datetime(2006, 3, 31),
datetime(2008, 2, 28): datetime(2007, 3, 31),
datetime(2008, 3, 31): datetime(2007, 3, 31),
datetime(2006, 3, 29): datetime(2005, 3, 31),
datetime(2006, 3, 30): datetime(2005, 3, 31),
datetime(2007, 3, 1): datetime(2006, 3, 31), }))
tests.append((YearEnd(-2, month=3),
{datetime(2007, 1, 1): datetime(2005, 3, 31),
datetime(2008, 6, 30): datetime(2007, 3, 31),
datetime(2008, 3, 31): datetime(2006, 3, 31), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assertEq(offset, base, expected)
def test_onOffset(self):
tests = [
(YearEnd(month=3), datetime(2007, 3, 31), True),
(YearEnd(month=3), datetime(2008, 1, 1), False),
(YearEnd(month=3), datetime(2006, 3, 31), True),
(YearEnd(month=3), datetime(2006, 3, 29), False),
]
for offset, dt, expected in tests:
assertOnOffset(offset, dt, expected)
def assertEq(offset, base, expected):
actual = offset + base
actual_swapped = base + offset
actual_apply = offset.apply(base)
try:
assert actual == expected
assert actual_swapped == expected
assert actual_apply == expected
except AssertionError:
raise AssertionError("\nExpected: %s\nActual: %s\nFor Offset: %s)"
"\nAt Date: %s" %
(expected, actual, offset, base))
def test_Easter():
assertEq(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4))
assertEq(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24))
assertEq(Easter(2), datetime(2010, 1, 1), datetime(2011, 4, 24))
assertEq(Easter(), datetime(2010, 4, 4), datetime(2011, 4, 24))
assertEq(Easter(2), datetime(2010, 4, 4), datetime(2012, 4, 8))
assertEq(-Easter(), datetime(2011, 1, 1), datetime(2010, 4, 4))
assertEq(-Easter(), datetime(2010, 4, 5), datetime(2010, 4, 4))
assertEq(-Easter(2), datetime(2011, 1, 1), datetime(2009, 4, 12))
assertEq(-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12))
assertEq(-Easter(2), datetime(2010, 4, 4), datetime(2008, 3, 23))
class TestTicks(object):
ticks = [Hour, Minute, Second, Milli, Micro, Nano]
def test_ticks(self):
offsets = [(Hour, Timedelta(hours=5)),
(Minute, Timedelta(hours=2, minutes=3)),
(Second, Timedelta(hours=2, seconds=3)),
(Milli, Timedelta(hours=2, milliseconds=3)),
(Micro, Timedelta(hours=2, microseconds=3)),
(Nano, Timedelta(hours=2, nanoseconds=3))]
for kls, expected in offsets:
offset = kls(3)
result = offset + Timedelta(hours=2)
assert isinstance(result, Timedelta)
assert result == expected
def test_Hour(self):
assertEq(Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 1))
assertEq(Hour(-1), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assertEq(2 * Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 2))
assertEq(-1 * Hour(), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))
assert Hour(3) + Hour(2) == Hour(5)
assert Hour(3) - Hour(2) == Hour()
assert Hour(4) != Hour(1)
def test_Minute(self):
assertEq(Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 1))
assertEq(Minute(-1), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))
assertEq(2 * Minute(), datetime(2010, 1, 1),
datetime(2010, 1, 1, 0, 2))
assertEq(-1 * Minute(), datetime(2010, 1, 1, 0, 1),
datetime(2010, 1, 1))
assert Minute(3) + Minute(2) == Minute(5)
assert Minute(3) - Minute(2) == Minute()
assert Minute(5) != Minute()
def test_Second(self):
assertEq(Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 1))
assertEq(Second(-1), datetime(2010, 1, 1,
0, 0, 1), datetime(2010, 1, 1))
assertEq(2 * Second(), datetime(2010, 1, 1),
datetime(2010, 1, 1, 0, 0, 2))
assertEq(-1 * Second(), datetime(2010, 1, 1, 0, 0, 1),
datetime(2010, 1, 1))
assert Second(3) + Second(2) == Second(5)
assert Second(3) - Second(2) == Second()
def test_Millisecond(self):
assertEq(Milli(), datetime(2010, 1, 1),
datetime(2010, 1, 1, 0, 0, 0, 1000))
assertEq(Milli(-1), datetime(2010, 1, 1, 0,
0, 0, 1000), datetime(2010, 1, 1))
assertEq(Milli(2), datetime(2010, 1, 1),
datetime(2010, 1, 1, 0, 0, 0, 2000))
assertEq(2 * Milli(), datetime(2010, 1, 1),
datetime(2010, 1, 1, 0, 0, 0, 2000))
assertEq(-1 * Milli(), datetime(2010, 1, 1, 0, 0, 0, 1000),
datetime(2010, 1, 1))
assert Milli(3) + Milli(2) == Milli(5)
assert Milli(3) - Milli(2) == Milli()
def test_MillisecondTimestampArithmetic(self):
assertEq(Milli(), Timestamp('2010-01-01'),
Timestamp('2010-01-01 00:00:00.001'))
assertEq(Milli(-1), Timestamp('2010-01-01 00:00:00.001'),
Timestamp('2010-01-01'))
def test_Microsecond(self):
assertEq(Micro(), datetime(2010, 1, 1),
datetime(2010, 1, 1, 0, 0, 0, 1))
assertEq(Micro(-1), datetime(2010, 1, 1,
0, 0, 0, 1), datetime(2010, 1, 1))
assertEq(2 * Micro(), datetime(2010, 1, 1),
datetime(2010, 1, 1, 0, 0, 0, 2))
assertEq(-1 * Micro(), datetime(2010, 1, 1, 0, 0, 0, 1),
datetime(2010, 1, 1))
assert Micro(3) + Micro(2) == Micro(5)
assert Micro(3) - Micro(2) == Micro()
def test_NanosecondGeneric(self):
timestamp = Timestamp(datetime(2010, 1, 1))
assert timestamp.nanosecond == 0
result = timestamp + Nano(10)
assert result.nanosecond == 10
reverse_result = Nano(10) + timestamp
assert reverse_result.nanosecond == 10
def test_Nanosecond(self):
timestamp = Timestamp(datetime(2010, 1, 1))
assertEq(Nano(), timestamp, timestamp + np.timedelta64(1, 'ns'))
assertEq(Nano(-1), timestamp + np.timedelta64(1, 'ns'), timestamp)
assertEq(2 * Nano(), timestamp, timestamp + np.timedelta64(2, 'ns'))
assertEq(-1 * Nano(), timestamp + np.timedelta64(1, 'ns'), timestamp)
assert Nano(3) + Nano(2) == Nano(5)
assert Nano(3) - Nano(2) == Nano()
# GH9284
assert Nano(1) + Nano(10) == Nano(11)
assert Nano(5) + Micro(1) == Nano(1005)
assert Micro(5) + Nano(1) == Nano(5001)
def test_tick_zero(self):
for t1 in self.ticks:
for t2 in self.ticks:
assert t1(0) == t2(0)
assert t1(0) + t2(0) == t1(0)
if t1 is not Nano:
assert t1(2) + t2(0) == t1(2)
if t1 is Nano:
assert t1(2) + Nano(0) == t1(2)
def test_tick_equalities(self):
for t in self.ticks:
assert t(3) == t(3)
assert t() == t(1)
# not equals
assert t(3) != t(2)
assert t(3) != t(-3)
def test_tick_operators(self):
for t in self.ticks:
assert t(3) + t(2) == t(5)
assert t(3) - t(2) == t(1)
assert t(800) + t(300) == t(1100)
assert t(1000) - t(5) == t(995)
def test_tick_offset(self):
for t in self.ticks:
assert not t().isAnchored()
def test_compare_ticks(self):
for kls in self.ticks:
three = kls(3)
four = kls(4)
for _ in range(10):
assert three < kls(4)
assert kls(3) < four
assert four > kls(3)
assert kls(4) > three
assert kls(3) == kls(3)
assert kls(3) != kls(4)
class TestOffsetNames(object):
def test_get_offset_name(self):
assert BDay().freqstr == 'B'
assert BDay(2).freqstr == '2B'
assert BMonthEnd().freqstr == 'BM'
assert Week(weekday=0).freqstr == 'W-MON'
assert Week(weekday=1).freqstr == 'W-TUE'
assert Week(weekday=2).freqstr == 'W-WED'
assert Week(weekday=3).freqstr == 'W-THU'
assert Week(weekday=4).freqstr == 'W-FRI'
assert LastWeekOfMonth(weekday=WeekDay.SUN).freqstr == "LWOM-SUN"
assert (makeFY5253LastOfMonthQuarter(
weekday=1, startingMonth=3,
qtr_with_extra_week=4).freqstr == "REQ-L-MAR-TUE-4")
assert (makeFY5253NearestEndMonthQuarter(
weekday=1, startingMonth=3,
qtr_with_extra_week=3).freqstr == "REQ-N-MAR-TUE-3")
def test_get_offset():
with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
get_offset('gibberish')
with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
get_offset('QS-JAN-B')
pairs = [
('B', BDay()), ('b', BDay()), ('bm', BMonthEnd()),
('Bm', BMonthEnd()), ('W-MON', Week(weekday=0)),
('W-TUE', Week(weekday=1)), ('W-WED', Week(weekday=2)),
('W-THU', Week(weekday=3)), ('W-FRI', Week(weekday=4)),
("RE-N-DEC-MON", makeFY5253NearestEndMonth(weekday=0,
startingMonth=12)),
("RE-L-DEC-TUE", makeFY5253LastOfMonth(weekday=1, startingMonth=12)),
("REQ-L-MAR-TUE-4", makeFY5253LastOfMonthQuarter(
weekday=1, startingMonth=3, qtr_with_extra_week=4)),
("REQ-L-DEC-MON-3", makeFY5253LastOfMonthQuarter(
weekday=0, startingMonth=12, qtr_with_extra_week=3)),
("REQ-N-DEC-MON-3", makeFY5253NearestEndMonthQuarter(
weekday=0, startingMonth=12, qtr_with_extra_week=3)),
]
for name, expected in pairs:
offset = get_offset(name)
assert offset == expected, ("Expected %r to yield %r (actual: %r)" %
(name, expected, offset))
def test_get_offset_legacy():
pairs = [('w@Sat', Week(weekday=5))]
for name, expected in pairs:
with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
get_offset(name)
class TestParseTimeString(object):
def test_parse_time_string(self):
(date, parsed, reso) = parse_time_string('4Q1984')
(date_lower, parsed_lower, reso_lower) = parse_time_string('4q1984')
assert date == date_lower
assert parsed == parsed_lower
assert reso == reso_lower
def test_parse_time_quarter_w_dash(self):
# https://github.com/pandas-dev/pandas/issue/9688
pairs = [('1988-Q2', '1988Q2'), ('2Q-1988', '2Q1988'), ]
for dashed, normal in pairs:
(date_dash, parsed_dash, reso_dash) = parse_time_string(dashed)
(date, parsed, reso) = parse_time_string(normal)
assert date_dash == date
assert parsed_dash == parsed
assert reso_dash == reso
pytest.raises(DateParseError, parse_time_string, "-2Q1992")
pytest.raises(DateParseError, parse_time_string, "2-Q1992")
pytest.raises(DateParseError, parse_time_string, "4-4Q1992")
def test_get_standard_freq():
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
fstr = get_standard_freq('W')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert fstr == get_standard_freq('w')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert fstr == get_standard_freq('1w')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert fstr == get_standard_freq(('W', 1))
with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
get_standard_freq('WeEk')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
fstr = get_standard_freq('5Q')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert fstr == get_standard_freq('5q')
with tm.assert_raises_regex(ValueError, _INVALID_FREQ_ERROR):
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
get_standard_freq('5QuarTer')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
assert fstr == get_standard_freq(('q', 5))
def test_quarterly_dont_normalize():
date = datetime(2012, 3, 31, 5, 30)
offsets = (QuarterBegin, QuarterEnd, BQuarterEnd, BQuarterBegin)
for klass in offsets:
result = date + klass()
assert (result.time() == date.time())
class TestOffsetAliases(object):
def setup_method(self, method):
_offset_map.clear()
def test_alias_equality(self):
for k, v in compat.iteritems(_offset_map):
if v is None:
continue
assert k == v.copy()
def test_rule_code(self):
lst = ['M', 'MS', 'BM', 'BMS', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
assert k == get_offset(k).rule_code
# should be cached - this is kind of an internals test...
assert k in _offset_map
assert k == (get_offset(k) * 3).rule_code
suffix_lst = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
base = 'W'
for v in suffix_lst:
alias = '-'.join([base, v])
assert alias == get_offset(alias).rule_code
assert alias == (get_offset(alias) * 5).rule_code
suffix_lst = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG',
'SEP', 'OCT', 'NOV', 'DEC']
base_lst = ['A', 'AS', 'BA', 'BAS', 'Q', 'QS', 'BQ', 'BQS']
for base in base_lst:
for v in suffix_lst:
alias = '-'.join([base, v])
assert alias == get_offset(alias).rule_code
assert alias == (get_offset(alias) * 5).rule_code
lst = ['M', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
code, stride = get_freq_code('3' + k)
assert isinstance(code, int)
assert stride == 3
assert k == _get_freq_str(code)
def test_apply_ticks():
result = offsets.Hour(3).apply(offsets.Hour(4))
exp = offsets.Hour(7)
assert (result == exp)
def test_delta_to_tick():
delta = timedelta(3)
tick = offsets._delta_to_tick(delta)
assert (tick == offsets.Day(3))
def test_dateoffset_misc():
oset = offsets.DateOffset(months=2, days=4)
# it works
oset.freqstr
assert (not offsets.DateOffset(months=2) == 2)
def test_freq_offsets():
off = BDay(1, offset=timedelta(0, 1800))
assert (off.freqstr == 'B+30Min')
off = BDay(1, offset=timedelta(0, -1800))
assert (off.freqstr == 'B-30Min')
def get_all_subclasses(cls):
ret = set()
this_subclasses = cls.__subclasses__()
ret = ret | set(this_subclasses)
for this_subclass in this_subclasses:
ret | get_all_subclasses(this_subclass)
return ret
class TestCaching(object):
# as of GH 6479 (in 0.14.0), offset caching is turned off
# as of v0.12.0 only BusinessMonth/Quarter were actually caching
def setup_method(self, method):
_daterange_cache.clear()
_offset_map.clear()
def run_X_index_creation(self, cls):
inst1 = cls()
if not inst1.isAnchored():
assert not inst1._should_cache(), cls
return
assert inst1._should_cache(), cls
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 31),
freq=inst1, normalize=True)
assert cls() in _daterange_cache, cls
def test_should_cache_month_end(self):
assert not MonthEnd()._should_cache()
def test_should_cache_bmonth_end(self):
assert not BusinessMonthEnd()._should_cache()
def test_should_cache_week_month(self):
assert not WeekOfMonth(weekday=1, week=2)._should_cache()
def test_all_cacheableoffsets(self):
for subclass in get_all_subclasses(CacheableOffset):
if subclass.__name__[0] == "_" \
or subclass in TestCaching.no_simple_ctr:
continue
self.run_X_index_creation(subclass)
def test_month_end_index_creation(self):
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 31),
freq=MonthEnd(), normalize=True)
assert not MonthEnd() in _daterange_cache
def test_bmonth_end_index_creation(self):
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 29),
freq=BusinessMonthEnd(), normalize=True)
assert not BusinessMonthEnd() in _daterange_cache
def test_week_of_month_index_creation(self):
inst1 = WeekOfMonth(weekday=1, week=2)
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 29),
freq=inst1, normalize=True)
inst2 = WeekOfMonth(weekday=1, week=2)
assert inst2 not in _daterange_cache
class TestReprNames(object):
def test_str_for_named_is_name(self):
# look at all the amazing combinations!
month_prefixes = ['A', 'AS', 'BA', 'BAS', 'Q', 'BQ', 'BQS', 'QS']
names = [prefix + '-' + month
for prefix in month_prefixes
for month in ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL',
'AUG', 'SEP', 'OCT', 'NOV', 'DEC']]
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
names += ['W-' + day for day in days]
names += ['WOM-' + week + day
for week in ('1', '2', '3', '4') for day in days]
_offset_map.clear()
for name in names:
offset = get_offset(name)
assert offset.freqstr == name
def get_utc_offset_hours(ts):
# take a Timestamp and compute total hours of utc offset
o = ts.utcoffset()
return (o.days * 24 * 3600 + o.seconds) / 3600.0
class TestDST(object):
"""
test DateOffset additions over Daylight Savings Time
"""
# one microsecond before the DST transition
ts_pre_fallback = "2013-11-03 01:59:59.999999"
ts_pre_springfwd = "2013-03-10 01:59:59.999999"
# test both basic names and dateutil timezones
timezone_utc_offsets = {
'US/Eastern': dict(utc_offset_daylight=-4,
utc_offset_standard=-5, ),
'dateutil/US/Pacific': dict(utc_offset_daylight=-7,
utc_offset_standard=-8, )
}
valid_date_offsets_singular = [
'weekday', 'day', 'hour', 'minute', 'second', 'microsecond'
]
valid_date_offsets_plural = [
'weeks', 'days',
'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'
]
def _test_all_offsets(self, n, **kwds):
valid_offsets = self.valid_date_offsets_plural if n > 1 \
else self.valid_date_offsets_singular
for name in valid_offsets:
self._test_offset(offset_name=name, offset_n=n, **kwds)
def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):
offset = DateOffset(**{offset_name: offset_n})
t = tstart + offset
if expected_utc_offset is not None:
assert get_utc_offset_hours(t) == expected_utc_offset
if offset_name == 'weeks':
# dates should match
assert t.date() == timedelta(days=7 * offset.kwds[
'weeks']) + tstart.date()
# expect the same day of week, hour of day, minute, second, ...
assert (t.dayofweek == tstart.dayofweek and
t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second)
elif offset_name == 'days':
# dates should match
assert timedelta(offset.kwds['days']) + tstart.date() == t.date()
# expect the same hour of day, minute, second, ...
assert (t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second)
elif offset_name in self.valid_date_offsets_singular:
# expect the signular offset value to match between tstart and t
datepart_offset = getattr(t, offset_name
if offset_name != 'weekday' else
'dayofweek')
assert datepart_offset == offset.kwds[offset_name]
else:
# the offset should be the same as if it was done in UTC
assert (t == (tstart.tz_convert('UTC') + offset)
.tz_convert('US/Pacific'))
def _make_timestamp(self, string, hrs_offset, tz):
if hrs_offset >= 0:
offset_string = '{hrs:02d}00'.format(hrs=hrs_offset)
else:
offset_string = '-{hrs:02d}00'.format(hrs=-1 * hrs_offset)
return Timestamp(string + offset_string).tz_convert(tz)
def test_fallback_plural(self):
# test moving from daylight savings to standard time
import dateutil
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_daylight']
hrs_post = utc_offsets['utc_offset_standard']
if dateutil.__version__ != LooseVersion('2.6.0'):
# buggy ambiguous behavior in 2.6.0
# GH 14621
# https://github.com/dateutil/dateutil/issues/321
self._test_all_offsets(
n=3, tstart=self._make_timestamp(self.ts_pre_fallback,
hrs_pre, tz),
expected_utc_offset=hrs_post)
def test_springforward_plural(self):
# test moving from standard to daylight savings
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
hrs_post = utc_offsets['utc_offset_daylight']
self._test_all_offsets(
n=3, tstart=self._make_timestamp(self.ts_pre_springfwd,
hrs_pre, tz),
expected_utc_offset=hrs_post)
def test_fallback_singular(self):
# in the case of signular offsets, we dont neccesarily know which utc
# offset the new Timestamp will wind up in (the tz for 1 month may be
# different from 1 second) so we don't specify an expected_utc_offset
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(n=1, tstart=self._make_timestamp(
self.ts_pre_fallback, hrs_pre, tz), expected_utc_offset=None)
def test_springforward_singular(self):
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(n=1, tstart=self._make_timestamp(
self.ts_pre_springfwd, hrs_pre, tz), expected_utc_offset=None)
def test_all_offset_classes(self):
tests = {MonthBegin: ['11/2/2012', '12/1/2012'],
MonthEnd: ['11/2/2012', '11/30/2012'],
BMonthBegin: ['11/2/2012', '12/3/2012'],
BMonthEnd: ['11/2/2012', '11/30/2012'],
CBMonthBegin: ['11/2/2012', '12/3/2012'],
CBMonthEnd: ['11/2/2012', '11/30/2012'],
SemiMonthBegin: ['11/2/2012', '11/15/2012'],
SemiMonthEnd: ['11/2/2012', '11/15/2012'],
Week: ['11/2/2012', '11/9/2012'],
YearBegin: ['11/2/2012', '1/1/2013'],
YearEnd: ['11/2/2012', '12/31/2012'],
BYearBegin: ['11/2/2012', '1/1/2013'],
BYearEnd: ['11/2/2012', '12/31/2012'],
QuarterBegin: ['11/2/2012', '12/1/2012'],
QuarterEnd: ['11/2/2012', '12/31/2012'],
BQuarterBegin: ['11/2/2012', '12/3/2012'],
BQuarterEnd: ['11/2/2012', '12/31/2012'],
Day: ['11/4/2012', '11/4/2012 23:00']}
for offset, test_values in iteritems(tests):
first = Timestamp(test_values[0], tz='US/Eastern') + offset()
second = Timestamp(test_values[1], tz='US/Eastern')
assert first == second
| mit |
eggplantbren/ExperimentalNS | TwoScalars/DNest/postprocess.py | 1 | 7100 | # Copyright (c) 2009, 2010, 2011, 2012 Brendon J. Brewer.
#
# This file is part of DNest3.
#
# DNest3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DNest3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DNest3. If not, see <http://www.gnu.org/licenses/>.
import copy
import numpy as np
import matplotlib.pyplot as plt
def logsumexp(values):
biggest = np.max(values)
x = values - biggest
result = np.log(np.sum(np.exp(x))) + biggest
return result
def logdiffexp(x1, x2):
biggest = x1
xx1 = x1 - biggest
xx2 = x2 - biggest
result = np.log(np.exp(xx1) - np.exp(xx2)) + biggest
return result
def postprocess(temperature=1., numResampleLogX=1, plot=True, loaded=[], \
cut=0., save=True, zoom_in=True):
if len(loaded) == 0:
levels = np.atleast_2d(np.loadtxt("levels.txt"))
sample_info = np.atleast_2d(np.loadtxt("sample_info.txt"))
sample = np.atleast_2d(np.loadtxt("sample.txt"))
#if(sample.shape[0] == 1):
# sample = sample.T
else:
levels, sample_info, sample = loaded[0], loaded[1], loaded[2]
sample = sample[int(cut*sample.shape[0]):, :]
sample_info = sample_info[int(cut*sample_info.shape[0]):, :]
if sample.shape[0] != sample_info.shape[0]:
print('# Size mismatch. Truncating...')
lowest = np.min([sample.shape[0], sample_info.shape[0]])
sample = sample[0:lowest, :]
sample_info = sample_info[0:lowest, :]
if plot:
if numResampleLogX > 1:
plt.ion()
plt.figure(1)
plt.plot(sample_info[:,0])
plt.xlabel("Iteration")
plt.ylabel("Level")
if numResampleLogX > 1:
plt.draw()
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(np.diff(levels[:,0]))
plt.ylabel("Compression")
plt.xlabel("Level")
xlim = plt.gca().get_xlim()
plt.axhline(-1., color='r')
plt.ylim(ymax=0.05)
if numResampleLogX > 1:
plt.draw()
plt.subplot(2,1,2)
good = np.nonzero(levels[:,4] > 0)[0]
plt.plot(levels[good,3]/levels[good,4])
plt.xlim(xlim)
plt.ylim([0., 1.])
plt.xlabel("Level")
plt.ylabel("MH Acceptance")
if numResampleLogX > 1:
plt.draw()
# Convert to lists of tuples
logl_levels = [(levels[i,1], levels[i, 2]) for i in xrange(0, levels.shape[0])] # logl, tiebreaker
logl_samples = [(sample_info[i, 1], sample_info[i, 2], i) for i in xrange(0, sample.shape[0])] # logl, tiebreaker, id
logx_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logp_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logP_samples = np.zeros((sample_info.shape[0], numResampleLogX))
P_samples = np.zeros((sample_info.shape[0], numResampleLogX))
logz_estimates = np.zeros((numResampleLogX, 1))
H_estimates = np.zeros((numResampleLogX, 1))
# Find sandwiching level for each sample
sandwich = sample_info[:,0].copy().astype('int')
sandwich *= 0
for i in xrange(0, sample.shape[0]):
while sandwich[i] < levels.shape[0]-1 and logl_samples[i] > logl_levels[sandwich[i] + 1]:
sandwich[i] += 1
for z in xrange(0, numResampleLogX):
# For each level
for i in range(0, levels.shape[0]):
# Find the samples sandwiched by this level
which = np.nonzero(sandwich == i)[0]
logl_samples_thisLevel = [] # (logl, tieBreaker, ID)
for j in xrange(0, len(which)):
logl_samples_thisLevel.append(copy.deepcopy(logl_samples[which[j]]))
logl_samples_thisLevel = sorted(logl_samples_thisLevel)
N = len(logl_samples_thisLevel)
# Generate intermediate logx values
logx_max = levels[i, 0]
if i == levels.shape[0]-1:
logx_min = -1E300
else:
logx_min = levels[i+1, 0]
Umin = np.exp(logx_min - logx_max)
if N == 0 or numResampleLogX > 1:
U = Umin + (1. - Umin)*np.random.rand(len(which))
else:
U = Umin + (1. - Umin)*np.linspace(1./(N+1), 1. - 1./(N+1), N)
logx_samples_thisLevel = np.sort(logx_max + np.log(U))[::-1]
for j in xrange(0, which.size):
logx_samples[logl_samples_thisLevel[j][2]][z] = logx_samples_thisLevel[j]
if j != which.size - 1:
left = logx_samples_thisLevel[j+1]
elif i == levels.shape[0]-1:
left = -1E300
else:
left = levels[i+1][0]
if j != 0:
right = logx_samples_thisLevel[j-1]
else:
right = levels[i][0]
logp_samples[logl_samples_thisLevel[j][2]][z] = np.log(0.5) + logdiffexp(right, left)
logl = sample_info[:,1]/temperature
logp_samples[:,z] = logp_samples[:,z] - logsumexp(logp_samples[:,z])
logP_samples[:,z] = logp_samples[:,z] + logl
logz_estimates[z] = logsumexp(logP_samples[:,z])
logP_samples[:,z] -= logz_estimates[z]
P_samples[:,z] = np.exp(logP_samples[:,z])
H_estimates[z] = -logz_estimates[z] + np.sum(P_samples[:,z]*logl)
if plot:
plt.figure(3)
if z == 0:
plt.subplot(2,1,1)
plt.plot(logx_samples[:,z], sample_info[:,1], 'b.', label='Samples')
plt.hold(True)
plt.plot(levels[1:,0], levels[1:,1], 'r.', label='Levels')
plt.legend(numpoints=1, loc='lower left')
plt.ylabel('log(L)')
plt.title(str(z+1) + "/" + str(numResampleLogX) + ", log(Z) = " + str(logz_estimates[z][0]))
# Use all plotted logl values to set ylim
combined_logl = np.hstack([sample_info[:,1], levels[1:, 1]])
combined_logl = np.sort(combined_logl)
lower = combined_logl[int(0.1*combined_logl.size)]
upper = combined_logl[-1]
diff = upper - lower
lower -= 0.05*diff
upper += 0.05*diff
if zoom_in:
plt.ylim([lower, upper])
if numResampleLogX > 1:
plt.draw()
xlim = plt.gca().get_xlim()
if plot:
plt.subplot(2,1,2)
plt.hold(False)
plt.plot(logx_samples[:,z], P_samples[:,z], 'b.')
plt.ylabel('Posterior Weights')
plt.xlabel('log(X)')
plt.xlim(xlim)
if numResampleLogX > 1:
plt.draw()
P_samples = np.mean(P_samples, 1)
P_samples = P_samples/np.sum(P_samples)
logz_estimate = np.mean(logz_estimates)
logz_error = np.std(logz_estimates)
H_estimate = np.mean(H_estimates)
H_error = np.std(H_estimates)
ESS = np.exp(-np.sum(P_samples*np.log(P_samples+1E-300)))
print("log(Z) = " + str(logz_estimate) + " +- " + str(logz_error))
print("Information = " + str(H_estimate) + " +- " + str(H_error) + " nats.")
print("Effective sample size = " + str(ESS))
# Resample to uniform weight
N = int(ESS)
posterior_sample = np.zeros((N, sample.shape[1]))
w = P_samples
w = w/np.max(w)
if save:
np.savetxt('weights.txt', w) # Save weights
for i in xrange(0, N):
while True:
which = np.random.randint(sample.shape[0])
if np.random.rand() <= w[which]:
break
posterior_sample[i,:] = sample[which,:]
if save:
np.savetxt("posterior_sample.txt", posterior_sample)
if plot:
if numResampleLogX > 1:
plt.ioff()
plt.show()
return [logz_estimate, H_estimate, logx_samples, logp_samples.flatten()]
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.