repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pandas/sandbox/qtpandas.py | 13 | 4347 | '''
Easy integration of DataFrame into pyqt framework
@author: Jev Kuznetsov
'''
# GH9615
import warnings
warnings.warn("The pandas.sandbox.qtpandas module is deprecated and will be "
"removed in a future version. We refer users to the external package "
"here: https://github.com/datalyze-solutions/pandas-qt")
try:
from PyQt4.QtCore import QAbstractTableModel, Qt, QVariant, QModelIndex
from PyQt4.QtGui import (
QApplication, QDialog, QVBoxLayout, QTableView, QWidget)
except ImportError:
from PySide.QtCore import QAbstractTableModel, Qt, QModelIndex
from PySide.QtGui import (
QApplication, QDialog, QVBoxLayout, QTableView, QWidget)
QVariant = lambda value=None: value
from pandas import DataFrame, Index
class DataFrameModel(QAbstractTableModel):
''' data model for a DataFrame class '''
def __init__(self):
super(DataFrameModel, self).__init__()
self.df = DataFrame()
def setDataFrame(self, dataFrame):
self.df = dataFrame
def signalUpdate(self):
''' tell viewers to update their data (this is full update, not
efficient)'''
self.layoutChanged.emit()
#------------- table display functions -----------------
def headerData(self, section, orientation, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if orientation == Qt.Horizontal:
try:
return self.df.columns.tolist()[section]
except (IndexError, ):
return QVariant()
elif orientation == Qt.Vertical:
try:
# return self.df.index.tolist()
return self.df.index.tolist()[section]
except (IndexError, ):
return QVariant()
def data(self, index, role=Qt.DisplayRole):
if role != Qt.DisplayRole:
return QVariant()
if not index.isValid():
return QVariant()
return QVariant(str(self.df.ix[index.row(), index.column()]))
def flags(self, index):
flags = super(DataFrameModel, self).flags(index)
flags |= Qt.ItemIsEditable
return flags
def setData(self, index, value, role):
row = self.df.index[index.row()]
col = self.df.columns[index.column()]
if hasattr(value, 'toPyObject'):
# PyQt4 gets a QVariant
value = value.toPyObject()
else:
# PySide gets an unicode
dtype = self.df[col].dtype
if dtype != object:
value = None if value == '' else dtype.type(value)
self.df.set_value(row, col, value)
return True
def rowCount(self, index=QModelIndex()):
return self.df.shape[0]
def columnCount(self, index=QModelIndex()):
return self.df.shape[1]
class DataFrameWidget(QWidget):
''' a simple widget for using DataFrames in a gui '''
def __init__(self, dataFrame, parent=None):
super(DataFrameWidget, self).__init__(parent)
self.dataModel = DataFrameModel()
self.dataTable = QTableView()
self.dataTable.setModel(self.dataModel)
layout = QVBoxLayout()
layout.addWidget(self.dataTable)
self.setLayout(layout)
# Set DataFrame
self.setDataFrame(dataFrame)
def setDataFrame(self, dataFrame):
self.dataModel.setDataFrame(dataFrame)
self.dataModel.signalUpdate()
self.dataTable.resizeColumnsToContents()
#-----------------stand alone test code
def testDf():
''' creates test dataframe '''
data = {'int': [1, 2, 3], 'float': [1.5, 2.5, 3.5],
'string': ['a', 'b', 'c'], 'nan': [np.nan, np.nan, np.nan]}
return DataFrame(data, index=Index(['AAA', 'BBB', 'CCC']),
columns=['int', 'float', 'string', 'nan'])
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
df = testDf() # make up some data
widget = DataFrameWidget(df)
widget.resizeColumnsToContents()
layout = QVBoxLayout()
layout.addWidget(widget)
self.setLayout(layout)
if __name__ == '__main__':
import sys
import numpy as np
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
| artistic-2.0 |
shikhardb/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
deepesch/scikit-learn | sklearn/grid_search.py | 32 | 36586 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if estimator is a classifier
and the target y is binary or multiclass, or the number
of folds in KFold otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if estimator is a classifier
and the target y is binary or multiclass, or the number
of folds in KFold otherwise.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
nuclear-wizard/moose | test/tests/time_integrators/scalar/run_stiff.py | 12 | 5982 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import subprocess
import sys
import csv
import matplotlib.pyplot as plt
import numpy as np
# Use fonts that match LaTeX
from matplotlib import rcParams
rcParams['font.family'] = 'serif'
rcParams['font.size'] = 17
rcParams['font.serif'] = ['Computer Modern Roman']
rcParams['text.usetex'] = True
# Small font size for the legend
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('x-small')
def get_last_row(csv_filename):
'''
Function which returns just the last row of a CSV file. We have to
read every line of the file, there was no stackoverflow example of
reading just the last line.
http://stackoverflow.com/questions/20296955/reading-last-row-from-csv-file-python-error
'''
with open(csv_filename, 'r') as f:
lastrow = None
for row in csv.reader(f):
if (row != []): # skip blank lines at end of file.
lastrow = row
return lastrow
def run_moose(y2_exponent, dt, time_integrator, lam):
'''
Function which actually runs MOOSE.
'''
command_line_args = ['../../../moose_test-opt', '-i', 'stiff.i',
'Executioner/dt={}'.format(dt),
'Executioner/dtmin={}'.format(dt),
'Executioner/TimeIntegrator/type={}'.format(time_integrator),
'LAMBDA={}'.format(lam),
'Y2_EXPONENT={}'.format(y2_exponent)]
try:
child = subprocess.Popen(command_line_args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# communicate() waits for the process to terminate, so there's no
# need to wait() for it. It also sets the returncode attribute on
# child.
(stdoutdata, stderrdata) = child.communicate()
if (child.returncode != 0):
print('Running MOOSE failed: program output is below:')
print(stdoutdata)
raise
except:
print('Error executing moose_test')
sys.exit(1)
# Parse the last line of the output file to get the error at the final time.
#
# The columns are in alphabetical order, we want to look at
# "max_error_y1", which is the "stiff" component of the system of
# ODEs.
# The columns are currently given by:
# time,error_y1,error_y2,max_error_y1,max_error_y2,value_y1,value_y1_abs_max,value_y2,value_y2_abs_max,y1,y2
output_filename = 'stiff_out.csv'
last_row = get_last_row(output_filename)
max_error_y1 = float(last_row[3])
value_y1_abs_max = float(last_row[6])
normalized_error = max_error_y1 / value_y1_abs_max
return normalized_error
#
# Main program
#
# implicit methods only - for the large values of lambda considered
# here, explicit methods would require very small timesteps, and so
# they are not considered here.
time_integrators = ['ImplicitEuler', 'CrankNicolson', 'BDF2', 'ImplicitMidpoint', 'LStableDirk2', 'LStableDirk3', 'LStableDirk4', 'AStableDirk4']
# The sequence of timesteps to try
dts = [.5, .25, .125, .0625, .03125, .015625, .0078125]
# The values of lambda to try
# .) lam=-2 is not allowed for p=2 case.
# .) lam=-1 is not allowed for p=1 case.
lams = [-.1, -10, -100, -1000, -10000]
# The values of the y2 exponent to try. 1=linear, 2=nonlinear.
y2_exponents = [1, 2]
# Plot colors
colors = ['maroon', 'blue', 'green', 'black', 'burlywood', 'olivedrab', 'midnightblue',
'tomato', 'darkmagenta', 'chocolate', 'lightslategray', 'skyblue']
# Plot line markers
markers = ['v', 'o', 'x', '^', 'H', 'h', '+', 'D', '*', '4', 'd', '8']
# Plot line styles
linestyles = [':', '-', '-', '--', ':', '-', '--', ':', '--', '-', '-', '-']
# Loop over:
# lambdas
# y2 exponents
# time_integrators
# dts
for lam in lams:
for y2_exponent in y2_exponents:
fig = plt.figure()
ax1 = fig.add_subplot(111)
for i in xrange(len(time_integrators)):
time_integrator = time_integrators[i]
# Place to store the results for this TimeIntegrator
results = []
# Call MOOSE to compute the results
for dt in dts:
results.append(run_moose(y2_exponent, dt, time_integrator, lam))
# Make plot
xdata = np.log10(np.reciprocal(dts))
ydata = np.log10(results)
# Compute linear fit of last three points.
start_fit = len(xdata) - 3
end_fit = len(xdata)
fit = np.polyfit(xdata[start_fit:end_fit], ydata[start_fit:end_fit], 1)
# Print results for tabulation etc.
print('{} (Slope={})'.format(time_integrator, fit[0]))
print('dt, max_error_y1')
for j in xrange(len(dts)):
print('{}, {}'.format(dts[j], results[j]))
print('') # blank line
ax1.plot(xdata, ydata, label=time_integrator + ", $" + "{:.2f}".format(fit[0]) + "$",
color=colors[i], marker=markers[i], linestyle=linestyles[i])
# Set up the axis labels.
ax1.set_xlabel('$\log (\Delta t^{-1})$')
ax1.set_ylabel('$\log (\|e\|_{L^{\infty}} / \|y_1\|_{L^{\infty}})$')
# The input file name up to the file extension
filebase = 'linear'
if (y2_exponent != 1):
filebase = 'nonlinear'
# Add a title
ax1.set_title('{}, $\\lambda = {}$'.format(filebase.title(), lam))
# Add a legend
plt.legend(loc='lower left', prop=fontP)
# Save a PDF
plt.savefig(filebase + '_lambda_{}.pdf'.format(lam), format='pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
clingsz/GAE | main.py | 1 | 1502 | # -*- coding: utf-8 -*-
"""
Created on Wed May 10 12:38:40 2017
@author: cling
"""
def summarizing_cross_validation():
import misc.cv.collect_ND5_3 as cv
cv.fig_boxplot_cverr()
def test_trainer():
import misc.data_gen as dg
import gae.model.trainer as tr
data = dg.get_training_data()
model = tr.build_gae()
model.train(data['X'],data['Y'])
def plot_immune_age():
import test.visualizations as vis
vis.plot_immune_age()
def plot_MIGS():
import test.visualizations as vis
vis.plot_cyto_age(cyto_name = 'IL6')
# vis.plot_cyto_age(cyto_name = 'IL1B')
# vis.Jacobian_analysis()
def distribution_test():
import immuAnalysis.distribution_test as dt
dt.show_hist(range(3,53),'dist_cyto.pdf')
dt.show_hist(range(53,78),'dist_cell.pdf')
def cell_cluster():
import immuAnalysis.cytokine_clustering as cc
## cc.main()
## cc.pvclust_main()
cc.agclust_main()
# B = [10,20,50,100,1000]
## cc.gap_stats(B)
# import gfmatplotlib.pyplot as plt
# plt.figure(figsize=[5*4,5])
# for i in range(5):
# plt.subplot(1,5,i+1)
# cc.show_gapstats(B[i])
# plt.tight_layout()
# plt.show()
# cc.generate_data_for_pvclust()
# cc.choose_cluster()
#import immuAnalysis.module_genes as mg
if __name__ == '__main__':
test_trainer()
# summarizing_cross_validation()
# import immuAnalysis.clustering as c
##
# c.test()
# import immuAnalysis.gene_analysis_ann as g
# g.summarize()
| gpl-3.0 |
internetmosquito/image-tagging-apis | image_tagging.py | 1 | 16562 |
import os
import yaml
import json
import zipfile
import pandas
import simplejson
import ntpath
import base64
import time
from httplib2 import HttpLib2Error
from googleapiclient import discovery
from googleapiclient.errors import HttpError
from oauth2client.client import GoogleCredentials
from watson_developer_cloud import VisualRecognitionV3, WatsonException
from clarifai.client import ClarifaiApi
from imagga import ImaggaHelper
class ImageTagger(object):
"""
A class that gives access to this test application
"""
# For testing, we use CircleCI and have ciphered the config.yml at root with this command
# openssl aes-256-cbc -e -in config.yml -out config-cipher -k $KEY
# Where $KEY is the value of an environment variable that must be set in your CircleCI
VISUAL_RECOGNITION_KEY = ''
CLARIFAI_CLIENT_ID = ''
CLARIFAI_CLIENT_SECRET = ''
GOOGLE_VISION_DISCOVERY_URL='https://{api}.googleapis.com/$discovery/rest?version={apiVersion}'
IMAGE_FILE_TYPES = ['png', 'jpg', 'jpeg', 'gif']
def __init__(self, imagga_helper=None):
self.data_frame = pandas.DataFrame()
self.images_names = list()
self.apis = ['VisualRecognition', 'Clarifai', 'Imagga', 'GoogleVision']
self.visual_recognition = None
self.clarifai = None
self.google_vision_service = None
self.configured = False
if imagga_helper:
self.imagga_helper = imagga_helper
def configure_tagger(self, config_file):
"""
Reads the API credentials from the specified YAML file and initializes API clients
:param config_file: The file path to the config YAML file
:return: True if config file was parsed and API clients initialized correctly
"""
#Check if provided config yaml file actually does exist
if os.path.isfile(config_file):
config = yaml.safe_load(open(config_file))
# Get config data
self.VISUAL_RECOGNITION_KEY = config['visual-recognition']['api-key']
self.CLARIFAI_CLIENT_ID = config['clarifai']['client-id']
self.CLARIFAI_CLIENT_SECRET = config['clarifai']['client-secret']
self.GOOGLE_VISION_SECRET = config['google-vision']['api-key']
if self.VISUAL_RECOGNITION_KEY:
self.visual_recognition = VisualRecognitionV3('2016-05-20', api_key=self.VISUAL_RECOGNITION_KEY)
if self.CLARIFAI_CLIENT_ID and self.CLARIFAI_CLIENT_SECRET:
self.clarifai = ClarifaiApi(app_id=self.CLARIFAI_CLIENT_ID, app_secret=self.CLARIFAI_CLIENT_SECRET)
if self.GOOGLE_VISION_SECRET:
self.google_vision_service = discovery.build('vision',
'v1',
developerKey=self.GOOGLE_VISION_SECRET,
discoveryServiceUrl=self.GOOGLE_VISION_DISCOVERY_URL
)
if self.visual_recognition and self.clarifai and self.google_vision_service:
self.configured = True
else:
print('Could not find config file')
return False
def process_images_visual_recognition(self, folder_name=None, store_results=False):
"""
Processes the specified image folder using the Visual Recognition API
:param folder_name: The complete path where the images are
:param store_results: Indicates if obtained response should be stored as JSON file
:return: A DataFrame containing the available data
"""
data_frame = None
self.images_names = list()
# We can send a zip file to Visual Recognition, so let's try
zf = zipfile.ZipFile("sample-images.zip", "w")
for dirname, subdirs, files in os.walk(folder_name):
for filename in files:
if isinstance(filename, str) and filename.endswith(('.jpeg', '.jpg', '.png')):
self.images_names.append(filename)
zf.write(os.path.join(dirname, filename), arcname=filename)
zf.close()
# Check we have the client API instance
if self.visual_recognition:
with open('sample-images.zip', 'rb') as image_file:
try:
results = simplejson.dumps(self.visual_recognition.classify(images_file=image_file,
threshold=0.1),
indent=4,
skipkeys=True,
sort_keys=True)
except WatsonException as ex:
print('An error occured trying to get data from VisualReconginitio. More info {0}'.format(str(ex)))
return data_frame
if store_results:
fd = open('visual_recognition_classifications_results.json', 'w')
fd.write(results)
fd.close()
# Generate a dict with a list of tuples with all tags found per image
vr_results = dict()
try:
vr_data = json.loads(results.decode('string-escape').strip('"'))
if 'images' in vr_data.keys():
# print(vr_data)
for image in vr_data['images']:
tags_found = []
if 'image' in image.keys():
image_name = self.path_leaf(image['image'])
if 'classifiers' in image.keys():
for tag in image['classifiers'][0]['classes']:
if 'class' and 'score' in tag.keys():
tag_found = (tag['class'], tag['score'])
tags_found.append(tag_found)
vr_results[image_name] = tags_found
except Exception as ex:
print 'COULD NOT LOAD:', ex
data_series = pandas.Series(vr_results, index=self.images_names, name='VisualRecognition')
data_frame = pandas.DataFrame(data_series, index=self.images_names, columns=['VisualRecognition'])
# Removed generated zip file
os.remove('sample-images.zip')
return data_frame
def process_images_clarifai(self, folder_name=None):
"""
Processes the specified image folder using the Clarifai API
:param folder_name: The complete path where the images are
:return: A DataFrame containing the available data
"""
data_frame = None
# Check we have the client API instance
if self.clarifai:
# Generate a dict with a list of tuples with all tags found per image
clarifai_results = dict()
try:
open_files = []
# Generate a dict with a list of tuples with all tags found per image
clarifai_results = dict()
self.images_names = list()
if os.path.isdir(folder_name):
# Get the images if the exist and if they are in the supported types
images = [filename for filename in os.listdir(folder_name)
if os.path.isfile(os.path.join(folder_name, filename)) and
filename.split('.')[-1].lower() in self.IMAGE_FILE_TYPES]
for iterator, image_file in enumerate(images):
image_path = os.path.join(folder_name, image_file)
self.images_names.append(self.path_leaf(image_path))
image_file = open(image_path, 'rb')
image = (image_file, self.path_leaf(image_path))
open_files.append(image)
# Call Clarifai API
clarifai_data = self.clarifai.tag_images(open_files)
if 'results' in clarifai_data.keys():
# print(vr_data)
for iterator, image in enumerate(clarifai_data['results']):
tags_found = []
if 'result' in image.keys():
image_name = self.images_names[iterator]
# Try to get the tags obtained
result = image['result']
if result:
if 'tag' in result.keys():
tags = image['result']['tag']['classes']
list_tags = []
probs = image['result']['tag']['probs']
if tags and probs:
list_tags = zip(tags, probs)
clarifai_results[image_name] = list_tags
except Exception as ex:
print ('COULD NOT LOAD, reason {0}'.format(str(ex)))
sorted_names = sorted(self.images_names)
data_series = pandas.Series(clarifai_results, index=sorted_names, name='Clarifai')
data_frame = pandas.DataFrame(data_series, index=sorted_names, columns=['Clarifai'])
return data_frame
def process_images_google_vision(self, folder_name=None):
"""
Iterates over the specified folder and returns the combined response from calling Google Cloud Vision API
using only LABEL detection and 5 maximum per image. Since it looks like Google does not like sending more
than 10 images per Request, we have to make sure we process every batch of 10 images until finished
:param folder_name: The full path to the folder with images to be processed
:return: A DataFrame containing the available data
"""
data_frame = None
responses = []
self.images_names = list()
# Check if specified folder exists
if os.path.isdir(folder_name):
# Get the images if the exist and if they are in the supported types
images = [filename for filename in os.listdir(folder_name)
if os.path.isfile(os.path.join(folder_name, filename)) and
filename.split('.')[-1].lower() in self.IMAGE_FILE_TYPES]
payload = {}
payload['requests'] = []
# Create a list to associate responses with images
images_names = list()
for iterator, image_file in enumerate(images):
image_path = os.path.join(folder_name, image_file)
images_names.append(self.path_leaf(image_path))
self.images_names.append(self.path_leaf(image_path))
with open(image_path, 'rb') as image:
image_content = base64.b64encode(image.read())
image_payload = {}
image_payload['image'] = {}
image_payload['image']['content'] = image_content.decode('UTF-8')
image_payload['features'] = [{
'type': 'LABEL_DETECTION',
'maxResults': 5
}]
payload['requests'].append(image_payload)
# Need to check if we have reached 10 images, meaning we must send a request,
# Google does not like more than 10 images (more or less) per request
if (iterator + 1) % 10 == 0:
service_request = self.google_vision_service.images().annotate(body=payload)
payload = {}
payload['requests'] = []
try:
response = service_request.execute()
intermediate = response['responses']
merged = zip(images_names, intermediate)
response['responses'] = []
for element in merged:
image_labeled = {}
image_labeled[element[0]] = element[1]
response['responses'].append(image_labeled)
responses.append(response)
images_names = list()
# Add one second delay before making another request
time.sleep(5)
except (HttpError, HttpLib2Error) as ex:
print('The following error occurred trying to label images with Google {0}'.format(str(ex)))
continue
# This is just in case images were less than 10 or the remaining of more than any multiple of 10
service_request = self.google_vision_service.images().annotate(body=payload)
try:
response = service_request.execute()
intermediate = response['responses']
merged = zip(images_names, intermediate)
response['responses'] = []
for element in merged:
image_labeled = dict()
image_labeled[element[0]] = element[1]
response['responses'].append(image_labeled)
responses.append(response)
except (HttpError, HttpLib2Error) as ex:
print('The following error occurred trying to label images with Google {0}'.format(str(ex)))
finally:
# Iterate the responses and construct one single dictionary with one response key only
final_response = dict()
final_response['responses'] = []
for partial in responses:
labels = partial['responses']
final_response['responses'].append(labels)
google_results = dict()
# Process the dictionary and get the DataFrame with results
for tagged_responses in final_response['responses']:
tags_found = []
# Get the labels
for image_tagged in tagged_responses:
# Get the labels for this image
labels = image_tagged.values()
for label in labels[0]['labelAnnotations']:
tag_found = (label['description'], label['score'])
tags_found.append(tag_found)
google_results[image_tagged.keys()[0]] = tags_found
sorted_names = sorted(self.images_names)
data_series = pandas.Series(google_results, index=sorted_names, name='GoogleVision')
data_frame = pandas.DataFrame(data_series, index=sorted_names, columns=['GoogleVision'])
return data_frame
else:
raise ValueError('The input directory does not exist: %s' % folder_name)
def use_all(self, folder):
"""
:param folder: The folder containing images to be tagged
A wrapper that will use all available APIs
:return: A DataFrame with all available data
"""
results = None
if self.configured and os.path.isdir(folder):
vr_df = self.process_images_visual_recognition(folder)
cr_df = self.process_images_clarifai(folder)
im_df = self.imagga_helper.process_images(folder)
gv_df = self.process_images_google_vision(folder)
# Merge both dataframes into one
data_frames = [vr_df, cr_df, im_df, gv_df]
results = pandas.concat(data_frames, axis=1)
return results
def path_leaf(self, path):
"""
A simple helper function that returns the last path (the file) of a path
:param path: The whole path
:return: The filename
"""
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
if __name__ == '__main__':
imagga_tagger = ImaggaHelper()
imagga_tagger.configure_imagga_helper(config_file='config.yml')
app = ImageTagger(imagga_helper=imagga_tagger)
app.configure_tagger(config_file='config.yml')
tagged = app.use_all('sample_images')
print tagged
| mit |
codevlabs/pandashells | pandashells/bin/p_linspace.py | 7 | 1450 | #! /usr/bin/env python
# standard library imports
import sys # NOQA import sys to allow for mocking sys.argv in tests
import argparse
import textwrap
from pandashells.lib import module_checker_lib, arg_lib, io_lib
# import required dependencies
module_checker_lib.check_for_modules(['numpy', 'pandas'])
import numpy as np
import pandas as pd
def main():
msg = "Generate a linearly spaced set of data points."
msg = textwrap.dedent(
"""
Generate a linearly spaced set of data points.
-----------------------------------------------------------------------
Examples:
* Generate 7 points between 1 and 10
p.linspace 1 10 7
-----------------------------------------------------------------------
"""
)
# read command line arguments
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter, description=msg)
arg_lib.add_args(parser, 'io_out', 'example')
msg = 'start end npoints'
parser.add_argument("numbers", help=msg, type=str, nargs=3, metavar='')
# parse arguments
args = parser.parse_args()
min_val, max_val = float(args.numbers[0]), float(args.numbers[1])
N = int(args.numbers[2])
df = pd.DataFrame({'c0': np.linspace(min_val, max_val, N)})
# write dataframe to output
io_lib.df_to_output(args, df)
if __name__ == '__main__': # pragma: no cover
main()
| bsd-2-clause |
gclenaghan/scikit-learn | examples/gaussian_process/plot_compare_gpr_krr.py | 67 | 5191 | """
==========================================================
Comparison of kernel ridge and Gaussian process regression
==========================================================
Both kernel ridge regression (KRR) and Gaussian process regression (GPR) learn
a target function by employing internally the "kernel trick". KRR learns a
linear function in the space induced by the respective kernel which corresponds
to a non-linear function in the original space. The linear function in the
kernel space is chosen based on the mean-squared error loss with
ridge regularization. GPR uses the kernel to define the covariance of
a prior distribution over the target functions and uses the observed training
data to define a likelihood function. Based on Bayes theorem, a (Gaussian)
posterior distribution over target functions is defined, whose mean is used
for prediction.
A major difference is that GPR can choose the kernel's hyperparameters based
on gradient-ascent on the marginal likelihood function while KRR needs to
perform a grid search on a cross-validated loss function (mean-squared error
loss). A further difference is that GPR learns a generative, probabilistic
model of the target function and can thus provide meaningful confidence
intervals and posterior samples along with the predictions while KRR only
provides predictions.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise. The figure compares
the learned model of KRR and GPR based on a ExpSineSquared kernel, which is
suited for learning periodic functions. The kernel's hyperparameters control
the smoothness (l) and periodicity of the kernel (p). Moreover, the noise level
of the data is learned explicitly by GPR by an additional WhiteKernel component
in the kernel and by the regularization parameter alpha of KRR.
The figure shows that both methods learn reasonable models of the target
function. GPR correctly identifies the periodicity of the function to be
roughly 2*pi (6.28), while KRR chooses the doubled periodicity 4*pi. Besides
that, GPR provides reasonable confidence bounds on the prediction which are not
available for KRR. A major difference between the two methods is the time
required for fitting and predicting: while fitting KRR is fast in principle,
the grid-search for hyperparameter optimization scales exponentially with the
number of hyperparameters ("curse of dimensionality"). The gradient-based
optimization of the parameters in GPR does not suffer from this exponential
scaling and is thus considerable faster on this example with 3-dimensional
hyperparameter space. The time for predicting is similar; however, generating
the variance of the predictive distribution of GPR takes considerable longer
than just predicting the mean.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.kernel_ridge import KernelRidge
from sklearn.model_selection import GridSearchCV
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import WhiteKernel, ExpSineSquared
rng = np.random.RandomState(0)
# Generate sample data
X = 15 * rng.rand(100, 1)
y = np.sin(X).ravel()
y += 3 * (0.5 - rng.rand(X.shape[0])) # add noise
# Fit KernelRidge with parameter selection based on 5-fold cross validation
param_grid = {"alpha": [1e0, 1e-1, 1e-2, 1e-3],
"kernel": [ExpSineSquared(l, p)
for l in np.logspace(-2, 2, 10)
for p in np.logspace(0, 2, 10)]}
kr = GridSearchCV(KernelRidge(), cv=5, param_grid=param_grid)
stime = time.time()
kr.fit(X, y)
print("Time for KRR fitting: %.3f" % (time.time() - stime))
gp_kernel = ExpSineSquared(1.0, 5.0, periodicity_bounds=(1e-2, 1e1)) \
+ WhiteKernel(1e-1)
gpr = GaussianProcessRegressor(kernel=gp_kernel)
stime = time.time()
gpr.fit(X, y)
print("Time for GPR fitting: %.3f" % (time.time() - stime))
# Predict using kernel ridge
X_plot = np.linspace(0, 20, 10000)[:, None]
stime = time.time()
y_kr = kr.predict(X_plot)
print("Time for KRR prediction: %.3f" % (time.time() - stime))
# Predict using kernel ridge
stime = time.time()
y_gpr = gpr.predict(X_plot, return_std=False)
print("Time for GPR prediction: %.3f" % (time.time() - stime))
stime = time.time()
y_gpr, y_std = gpr.predict(X_plot, return_std=True)
print("Time for GPR prediction with standard-deviation: %.3f"
% (time.time() - stime))
# Plot results
plt.figure(figsize=(10, 5))
lw = 2
plt.scatter(X, y, c='k', label='data')
plt.plot(X_plot, np.sin(X_plot), color='navy', lw=lw, label='True')
plt.plot(X_plot, y_kr, color='turquoise', lw=lw,
label='KRR (%s)' % kr.best_params_)
plt.plot(X_plot, y_gpr, color='darkorange', lw=lw,
label='GPR (%s)' % gpr.kernel_)
plt.fill_between(X_plot[:, 0], y_gpr - y_std, y_gpr + y_std, color='darkorange',
alpha=0.2)
plt.xlabel('data')
plt.ylabel('target')
plt.xlim(0, 20)
plt.ylim(-4, 4)
plt.title('GPR versus Kernel Ridge')
plt.legend(loc="best", scatterpoints=1, prop={'size': 8})
plt.show()
| bsd-3-clause |
m3wolf/xanespy | tests/test_importers.py | 1 | 48905 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2016 Mark Wolf
#
# This file is part of Xanespy.
#
# Xanespy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Xanespy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Xanespy. If not, see <http://www.gnu.org/licenses/>.
# flake8: noqa
import logging
import datetime as dt
import unittest
from unittest import TestCase, mock
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
import warnings
import contextlib
import pytz
import numpy as np
import pandas as pd
import h5py
from skimage import data
import matplotlib.pyplot as plt
from xanespy import exceptions, utilities
from xanespy.xradia import XRMFile, TXRMFile
from xanespy.nanosurveyor import CXIFile, HDRFile
from xanespy.sxstm import SxstmDataFile
from xanespy.importers import (magnification_correction,
decode_aps_params, decode_ssrl_params,
import_ssrl_xanes_dir, CURRENT_VERSION,
import_nanosurveyor_frameset,
import_cosmic_frameset,
import_aps4idc_sxstm_files,
import_aps8bm_xanes_dir,
import_aps8bm_xanes_file,
import_aps32idc_xanes_files,
import_aps32idc_xanes_file,
read_metadata, minimum_shape,
rebin_image, )
from xanespy.txmstore import TXMStore
# logging.basicConfig(level=logging.DEBUG)
TEST_DIR = os.path.dirname(__file__)
SSRL_DIR = os.path.join(TEST_DIR, 'txm-data-ssrl')
APS_DIR = os.path.join(TEST_DIR, 'txm-data-aps')
APS32_DIR = os.path.join(TEST_DIR, 'txm-data-32-idc')
PTYCHO_DIR = os.path.join(TEST_DIR, 'ptycho-data-als/NS_160406074')
COSMIC_DIR = os.path.join(TEST_DIR, 'ptycho-data-cosmic')
SXSTM_DIR = os.path.join(TEST_DIR, "sxstm-data-4idc/")
class APS32IDCImportTest(TestCase):
"""Check that the program can import a collection of SSRL frames from
a directory."""
src_data = os.path.join(APS32_DIR, 'nca_32idc_xanes.h5')
def setUp(self):
self.hdf = os.path.join(APS32_DIR, 'testdata.h5')
if os.path.exists(self.hdf):
os.remove(self.hdf)
def tearDown(self):
if os.path.exists(self.hdf):
os.remove(self.hdf)
def test_imported_hdf(self):
# Run the import function
import_aps32idc_xanes_file(self.src_data,
hdf_filename=self.hdf, hdf_groupname='experiment1',
downsample=1)
# Check that the file was created
self.assertTrue(os.path.exists(self.hdf))
with h5py.File(self.hdf, mode='r') as f:
self.assertNotIn('experiment2', list(f.keys()))
parent_group = f['experiment1']
data_group = f['experiment1/imported']
# Check metadata about beamline
self.assertEqual(parent_group.attrs['technique'], 'Full-field TXM')
self.assertEqual(parent_group.attrs['xanespy_version'], CURRENT_VERSION)
self.assertEqual(parent_group.attrs['beamline'], "APS 32-ID-C")
self.assertEqual(parent_group.attrs['original_directory'],
os.path.dirname(self.src_data))
self.assertEqual(parent_group.attrs['latest_data_name'], 'imported')
# Check h5 data structure
keys = list(data_group.keys())
self.assertIn('intensities', keys)
self.assertTrue(np.any(data_group['intensities']))
self.assertEqual(data_group['intensities'].shape, (1, 3, 256, 256))
self.assertEqual(data_group['intensities'].attrs['context'], 'frameset')
self.assertIn('flat_fields', keys)
self.assertTrue(np.any(data_group['flat_fields']))
self.assertEqual(data_group['flat_fields'].attrs['context'], 'frameset')
self.assertIn('dark_fields', keys)
self.assertEqual(data_group['dark_fields'].shape, (1, 2, 256, 256))
self.assertTrue(np.any(data_group['dark_fields']))
self.assertEqual(data_group['dark_fields'].attrs['context'], 'frameset')
self.assertIn('optical_depths', keys)
self.assertEqual(data_group['optical_depths'].shape, (1, 3, 256, 256))
self.assertTrue(np.any(data_group['optical_depths']))
self.assertEqual(data_group['optical_depths'].attrs['context'], 'frameset')
self.assertEqual(data_group['pixel_sizes'].attrs['unit'], 'µm')
self.assertEqual(data_group['pixel_sizes'].shape, (1, 3))
# Original pixel size is 29.99nm but we have downsampling factor 1
self.assertTrue(np.all(data_group['pixel_sizes'].value == 0.02999 * 2))
self.assertEqual(data_group['energies'].shape, (1, 3))
expected_Es = np.array([[8340, 8350, 8360]])
np.testing.assert_array_almost_equal(data_group['energies'].value,
expected_Es, decimal=3)
self.assertIn('timestamps', keys)
expected_timestamp = np.empty(shape=(1, 3, 2), dtype="S32")
expected_timestamp[...,0] = b'2016-10-07 18:24:42'
expected_timestamp[...,1] = b'2016-10-07 18:37:42'
np.testing.assert_equal(data_group['timestamps'].value,
expected_timestamp)
self.assertIn('timestep_names', keys)
self.assertEqual(data_group['timestep_names'][0], bytes("soc000", 'ascii'))
self.assertIn('filenames', keys)
self.assertEqual(data_group['filenames'].shape, (1, 3))
self.assertEqual(data_group['filenames'][0, 0], self.src_data.encode('ascii'))
# self.assertIn('original_positions', keys)
def test_exclude_frames(self):
# Run the import function
import_aps32idc_xanes_file(self.src_data,
hdf_filename=self.hdf, hdf_groupname='experiment1',
downsample=1, exclude=(1,))
# Check that the file was created
self.assertTrue(os.path.exists(self.hdf))
with h5py.File(self.hdf, mode='r') as f:
self.assertNotIn('experiment2', list(f.keys()))
parent_group = f['experiment1']
data_group = f['experiment1/imported']
self.assertEqual(data_group['intensities'].shape, (1, 2, 256, 256))
def test_limited_dark_flat(self):
# Only import some of the flat and dark field images
import_aps32idc_xanes_file(self.src_data,
hdf_filename=self.hdf, hdf_groupname='experiment1',
downsample=0, dark_idx=slice(0, 1))
# Check that the right number of files were imported
with h5py.File(self.hdf, mode='r') as f:
grp = f['experiment1/imported']
self.assertEqual(grp['dark_fields'].shape[1], 1)
def test_import_multiple_hdfs(self):
import_aps32idc_xanes_files([self.src_data, self.src_data],
hdf_filename=self.hdf, hdf_groupname='experiment1',
square=False, downsample=0)
with h5py.File(self.hdf, mode='r') as f:
g = f['/experiment1/imported']
self.assertEqual(g['intensities'].shape, (2, 3, 512, 612))
self.assertTrue(np.any(g['intensities'][0]))
self.assertTrue(np.any(g['intensities'][1]))
# They should be equal since we have import the same data twice
np.testing.assert_equal(g['intensities'][0], g['intensities'][1])
def test_import_second_hdf(self):
# Run the import function
import_aps32idc_xanes_file(self.src_data,
hdf_filename=self.hdf, hdf_groupname='experiment1',
total_timesteps=2, square=False, downsample=0)
import_aps32idc_xanes_file(self.src_data,
hdf_filename=self.hdf, hdf_groupname='experiment1',
total_timesteps=2, timestep=1, append=True, square=False,
downsample=0)
with h5py.File(self.hdf, mode='r') as f:
g = f['/experiment1/imported']
self.assertEqual(g['intensities'].shape, (2, 3, 512, 612))
self.assertTrue(np.any(g['intensities'][0]))
self.assertTrue(np.any(g['intensities'][1]))
# They should be equal since we have import the same data twice
np.testing.assert_equal(g['intensities'][0], g['intensities'][1])
class CosmicTest(TestCase):
"""Test for importing STXM and ptychography data.
From ALS Cosmic beamline. Test data taken from beamtime on
2018-11-09. The cxi file is a stripped down version of the
original (to save space). Missing crucial data should be added to
the cxi as needed.
Data
====
ptycho-scan-856eV.cxi : NS_181110188_002.cxi
stxm-scan-a003.xim : NS_181110203_a003.xim
stxm-scan-a019.xim : NS_181110203_a019.xim
stxm-scan.hdr : NS_181110203.hdr
"""
stxm_hdr = os.path.join(COSMIC_DIR, 'stxm-scan.hdr')
ptycho_cxi = os.path.join(COSMIC_DIR, 'ptycho-scan-856eV.cxi')
hdf_filename = os.path.join(COSMIC_DIR, 'cosmic-test-import.h5')
def tearDown(self):
if os.path.exists(self.hdf_filename):
os.remove(self.hdf_filename)
def test_import_partial_data(self):
"""Check if the cosmic importer works if only hdr or cxi files are
given."""
# Import only STXM images
import_cosmic_frameset(stxm_hdr=[self.stxm_hdr],
ptycho_cxi=[],
hdf_filename=self.hdf_filename)
with TXMStore(self.hdf_filename, parent_name='stxm-scan') as store:
self.assertEqual(store.data_name, 'imported')
# Import only ptycho images
import_cosmic_frameset(stxm_hdr=[],
ptycho_cxi=[self.ptycho_cxi],
hdf_filename=self.hdf_filename)
with TXMStore(self.hdf_filename, parent_name='ptycho-scan-856eV') as store:
self.assertEqual(store.data_name, 'imported')
def test_import_cosmic_data(self):
# Check that passing no data raises and exception
with self.assertRaises(ValueError):
import_cosmic_frameset(hdf_filename=self.hdf_filename)
import_cosmic_frameset(stxm_hdr=[self.stxm_hdr],
ptycho_cxi=[self.ptycho_cxi],
hdf_filename=self.hdf_filename)
# Does the HDF file exist
self.assertTrue(os.path.exists(self.hdf_filename),
"%s doesn't exist" % self.hdf_filename)
hdf_kw = dict(hdf_filename=self.hdf_filename,
parent_name='ptycho-scan-856eV',
mode='r')
# Open ptychography TXM store and check its contents
with TXMStore(**hdf_kw, data_name='imported_ptychography') as store:
# Make sure the group exists
self.assertEqual(store.data_group().name,
'/ptycho-scan-856eV/imported_ptychography')
# Check the data structure
self.assertEqual(store.filenames.shape, (1, 1))
stored_filename = store.filenames[0,0].decode('utf-8')
self.assertEqual(stored_filename, os.path.basename(self.ptycho_cxi))
np.testing.assert_equal(store.energies.value, [[855.9056362433222]])
np.testing.assert_equal(store.pixel_sizes.value, [[6.0435606480754585]])
np.testing.assert_equal(store.pixel_unit, 'nm')
self.assertEqual(store.intensities.shape, (1, 1, 285, 285))
self.assertEqual(store.optical_depths.shape, (1, 1, 285, 285))
self.assertEqual(store.timestep_names[0].decode('utf-8'), 'ex-situ')
# Open STXM TXM store and check its contents
with TXMStore(**hdf_kw, data_name='imported_stxm') as store:
# Make sure the group exists
self.assertEqual(store.data_group().name,
'/ptycho-scan-856eV/imported_stxm')
# Check the data structure
self.assertEqual(store.filenames.shape, (1, 2))
stored_filename = store.filenames[0,0].decode('utf-8')
expected_filename = os.path.join(COSMIC_DIR, 'stxm-scan_a003.xim')
self.assertEqual(stored_filename, expected_filename)
np.testing.assert_equal(store.energies.value, [[853, 857.75]])
np.testing.assert_equal(store.pixel_sizes.value, [[27.2, 27.2]])
self.assertEqual(store.intensities.shape, (1, 2, 120, 120))
self.assertEqual(store.optical_depths.shape, (1, 2, 120, 120))
self.assertEqual(store.timestep_names[0].decode('utf-8'), 'ex-situ')
# Open imported TXMStore to check its contents
with TXMStore(**hdf_kw, data_name='imported') as store:
self.assertEqual(store.filenames.shape, (1, 3))
self.assertEqual(store.timestep_names.shape, (1,))
real_px_size = 6.0435606480754585
np.testing.assert_equal(store.pixel_sizes.value,
[[real_px_size, real_px_size, real_px_size]])
self.assertEqual(store.pixel_unit, 'nm')
class CosmicFileTest(TestCase):
stxm_hdr = os.path.join(COSMIC_DIR, 'stxm-scan.hdr')
ptycho_cxi = os.path.join(COSMIC_DIR, 'ptycho-scan-856eV.cxi')
def setUp(self):
self.hdr = HDRFile(self.stxm_hdr)
self.cxi = CXIFile(self.ptycho_cxi)
def test_hdr_filenames(self):
real_filenames = [os.path.join(COSMIC_DIR, f) for f in
('stxm-scan_a003.xim', 'stxm-scan_a019.xim')]
self.assertEqual(self.hdr.filenames(), real_filenames)
def test_cxi_filenames(self):
self.assertEqual(self.cxi.filenames(), ['ptycho-scan-856eV.cxi'])
def test_cxi_image_data(self):
with self.cxi:
self.assertEqual(self.cxi.num_images(), 1)
self.assertEqual(self.cxi.image_frames().shape, (1, 285, 285))
def test_cxi_image_shape(self):
with self.cxi:
self.assertEqual(self.cxi.image_shape(), (285, 285))
def test_cxi_energies(self):
with self.cxi:
self.assertAlmostEqual(self.cxi.energies()[0], 855.9056, places=3)
def test_cxi_pixel_size(self):
real_px_size = 6.0435606480754585
with self.cxi:
self.assertAlmostEqual(self.cxi.pixel_size(), real_px_size)
def test_hdr_pixel_size(self):
with self.hdr:
self.assertEqual(self.hdr.pixel_size(), 27.2)
def test_hdr_image_data(self):
self.assertEqual(self.hdr.num_images(), 2)
self.assertEqual(self.hdr.image_frames().shape, (2, 120, 120))
def test_hdr_image_shape(self):
self.assertEqual(self.hdr.image_shape(), (120, 120))
def test_hdr_energies(self):
with self.hdr:
self.assertAlmostEqual(self.hdr.energies()[0], 853., places=3)
def test_specific_hdr_files(self):
"""This test check specific HDR files that did not succeed at first.
"""
# This one has a negative sign in front of the x-position
filename1 = os.path.join(COSMIC_DIR, 'NS_181111148.hdr')
hdr1 = HDRFile(filename1)
self.assertAlmostEqual(hdr1.pixel_size(), 66.7)
class XradiaTest(TestCase):
txrm_filename = os.path.join(TEST_DIR, "aps-8BM-sample.txrm")
def test_pixel_size(self):
sample_filename = "rep01_20161456_ssrl-test-data_08324.0_eV_001of003.xrm"
with XRMFile(os.path.join(SSRL_DIR, sample_filename), flavor="ssrl") as xrm:
self.assertAlmostEqual(xrm.um_per_pixel(), 0.03287, places=4)
def test_timestamp_from_xrm(self):
pacific_tz = pytz.timezone("US/Pacific")
chicago_tz = pytz.timezone('US/Central')
sample_filename = "rep01_20161456_ssrl-test-data_08324.0_eV_001of003.xrm"
with XRMFile(os.path.join(SSRL_DIR, sample_filename), flavor="ssrl") as xrm:
# Check start time
start = pacific_tz.localize(dt.datetime(2016, 5, 29, 15, 2, 37))
start = start.astimezone(pytz.utc).replace(tzinfo=None)
self.assertEqual(xrm.starttime(), start)
self.assertEqual(xrm.starttime().tzinfo, None)
# Check end time (offset determined by exposure time)
end = pacific_tz.localize(dt.datetime(2016, 5, 29, 15, 2, 37, 500000))
end = end.astimezone(pytz.utc).replace(tzinfo=None)
self.assertEqual(xrm.endtime(), end)
xrm.close()
# Test APS frame
sample_filename = "fov03_xanesocv_8353_0eV.xrm"
with XRMFile(os.path.join(APS_DIR, sample_filename), flavor="aps") as xrm:
# Check start time
start = chicago_tz.localize(dt.datetime(2016, 7, 2, 17, 50, 35))
start = start.astimezone(pytz.utc).replace(tzinfo=None)
self.assertEqual(xrm.starttime(), start)
# Check end time (offset determined by exposure time)
end = chicago_tz.localize(dt.datetime(2016, 7, 2, 17, 51, 25))
end = end.astimezone(pytz.utc).replace(tzinfo=None)
self.assertEqual(xrm.endtime(), end)
def test_mosaic(self):
# txm-2015-11-11-aps/ncm111-cell1-chargeC15/20151111_002_mosaic_5x5_bin8_5s.xrm
mosaic_filename = 'mosaic_4x4_bin8.xrm'
with XRMFile(os.path.join(TEST_DIR, mosaic_filename), flavor='aps') as xrm:
img_data = xrm.image_data()
# Check basic shape details
self.assertEqual(img_data.shape, (1024, 1024))
self.assertEqual(xrm.mosaic_columns, 4)
self.assertEqual(xrm.mosaic_rows, 4)
self.assertEqual(xrm.um_per_pixel(), 0.15578947961330414)
def test_str_and_repr(self):
sample_filename = "rep01_20161456_ssrl-test-data_08324.0_eV_001of003.xrm"
with XRMFile(os.path.join(SSRL_DIR, sample_filename), flavor="ssrl") as xrm:
self.assertEqual(repr(xrm), "<XRMFile: '{}'>".format(sample_filename))
self.assertEqual(str(xrm), "<XRMFile: '{}'>".format(sample_filename))
def test_binning(self):
sample_filename = "rep01_20161456_ssrl-test-data_08324.0_eV_001of003.xrm"
with XRMFile(os.path.join(SSRL_DIR, sample_filename), flavor="ssrl") as xrm:
self.assertEqual(xrm.binning(), (2, 2))
def test_frame_stack(self):
with TXRMFile(self.txrm_filename, flavor="aps") as txrm:
self.assertEqual(txrm.image_stack().shape, (3, 1024, 1024))
self.assertEqual(txrm.energies().shape, (3,))
def test_num_images(self):
with TXRMFile(self.txrm_filename, flavor="aps") as txrm:
self.assertEqual(txrm.num_images(), 3)
def test_starttimes(self):
with TXRMFile(self.txrm_filename, flavor="aps") as txrm:
result = txrm.starttimes()
expected_start = dt.datetime(2017, 7, 9, 0, 49, 2)
self.assertEqual(result[0], expected_start)
class PtychographyImportTest(TestCase):
def setUp(self):
self.hdf = os.path.join(PTYCHO_DIR, 'testdata.h5')
if os.path.exists(self.hdf):
os.remove(self.hdf)
def tearDown(self):
if os.path.exists(self.hdf):
os.remove(self.hdf)
def test_directory_names(self):
"""Tests for checking some of the edge cases for what can be passed as
a directory string."""
import_nanosurveyor_frameset(PTYCHO_DIR + "/", hdf_filename=self.hdf)
def test_imported_hdf(self):
import_nanosurveyor_frameset(PTYCHO_DIR, hdf_filename=self.hdf)
self.assertTrue(os.path.exists(self.hdf))
with h5py.File(self.hdf, mode='r') as f:
dataset_name = 'NS_160406074'
parent = f[dataset_name]
# Check metadata about the sample
self.assertEqual(parent.attrs['latest_data_name'], "imported")
group = parent['imported']
keys = list(group.keys())
# Check metadata about beamline
self.assertEqual(parent.attrs['technique'], 'ptychography STXM')
# Check data is structured properly
self.assertEqual(group['timestep_names'].value[0], bytes(dataset_name, 'ascii'))
self.assertIn('intensities', keys)
self.assertEqual(group['intensities'].shape, (1, 3, 228, 228))
self.assertEqual(group['intensities'].attrs['context'], 'frameset')
self.assertIn('stxm', keys)
self.assertEqual(group['stxm'].shape, (1, 3, 20, 20))
self.assertEqual(group['pixel_sizes'].attrs['unit'], 'nm')
self.assertTrue(np.all(group['pixel_sizes'].value == 4.16667),
msg=group['pixel_sizes'].value)
self.assertEqual(group['pixel_sizes'].shape, (1, 3))
expected_Es = np.array([[843.9069591, 847.90651815,
850.15627011]])
np.testing.assert_allclose(group['energies'].value, expected_Es)
self.assertEqual(group['energies'].shape, (1, 3))
## NB: Timestamps not available in the cxi files
# self.assertIn('timestamps', keys)
# expected_timestamp = np.array([
# [[b'2016-07-02 16:31:36-05:51', b'2016-07-02 16:32:26-05:51'],
# [b'2016-07-02 17:50:35-05:51', b'2016-07-02 17:51:25-05:51']],
# [[b'2016-07-02 22:19:23-05:51', b'2016-07-02 22:19:58-05:51'],
# [b'2016-07-02 23:21:21-05:51', b'2016-07-02 23:21:56-05:51']],
# ], dtype="S32")
# self.assertTrue(np.array_equal(group['timestamps'].value,
# expected_timestamp))
self.assertIn('filenames', keys)
self.assertEqual(group['filenames'].shape, (1, 3))
self.assertIn('relative_positions', keys)
self.assertEqual(group['relative_positions'].shape, (1, 3, 3))
## NB: It's not clear exactly what "original positions"
## means for STXM data
self.assertIn('original_positions', keys)
self.assertEqual(group['original_positions'].shape, (1, 3, 3))
def test_frame_shape(self):
"""In some cases, frames are different shapes. Specifying a shape in
the importer can fix this.
"""
expected_shape = (220, 220)
import_nanosurveyor_frameset(PTYCHO_DIR,
hdf_filename=self.hdf,
frame_shape=expected_shape)
with h5py.File(self.hdf, mode='r') as f:
real_shape = f['NS_160406074/imported/intensities'].shape
self.assertEqual(real_shape, (1, 3, *expected_shape))
def test_partial_import(self):
"""Sometimes the user may want to specify that only a subset of
ptychographs be imported.
"""
energy_range = (843, 848)
import_nanosurveyor_frameset(PTYCHO_DIR,
energy_range=energy_range,
hdf_filename=self.hdf, quiet=True)
with h5py.File(self.hdf, mode='r') as f:
dataset_name = 'NS_160406074'
parent = f[dataset_name]
group = parent['imported']
self.assertEqual(group['intensities'].shape[0:2],
(1, 2))
self.assertEqual(group['filenames'].shape, (1, 2))
def test_exclude_re(self):
"""Allow the user to exclude specific frames that are bad."""
import_nanosurveyor_frameset(PTYCHO_DIR,
exclude_re="(/009/|/100/)",
hdf_filename=self.hdf, quiet=True)
with h5py.File(self.hdf, mode='r') as f:
dataset_name = 'NS_160406074'
parent = f[dataset_name]
group = parent['imported']
self.assertEqual(group['intensities'].shape[0:2],
(1, 2))
def test_multiple_import(self):
"""Check if we can import multiple different directories of different
energies ranges."""
# Import two data sets (order is important to test for sorting)
import_nanosurveyor_frameset("{}-low-energy".format(PTYCHO_DIR),
hdf_filename=self.hdf, quiet=True,
hdf_groupname="merged")
import_nanosurveyor_frameset("{}-high-energy".format(PTYCHO_DIR),
hdf_filename=self.hdf, quiet=True,
hdf_groupname="merged",
append=True)
# Check resulting HDF5 file
with h5py.File(self.hdf) as f:
self.assertIn('merged', f.keys())
# Check that things are ordered by energy
saved_Es = f['/merged/imported/energies'].value
np.testing.assert_array_equal(saved_Es, np.sort(saved_Es))
# Construct the expected path relative to the current directory
relpath = "ptycho-data-als/NS_160406074-{}-energy/160406074/{}/NS_160406074.cxi"
toplevel = os.getcwd().split('/')[-1]
if toplevel == "tests":
test_dir = ''
else:
test_dir = 'tests'
relpath = os.path.join(test_dir, relpath)
# Compare the expeected file names
sorted_files = [[bytes(relpath.format("low", "001"), 'ascii'),
bytes(relpath.format("low", "009"), 'ascii'),
bytes(relpath.format("high", "021"), 'ascii'),]]
saved_files = f['/merged/imported/filenames']
np.testing.assert_array_equal(saved_files, sorted_files)
class APS8BMFileImportTest(TestCase):
txrm_file = os.path.join(TEST_DIR, 'aps-8BM-sample.txrm')
txrm_ref = os.path.join(TEST_DIR, 'aps-8BM-reference.txrm')
def setUp(self):
self.hdf = os.path.join(APS_DIR, 'testdata.h5')
if os.path.exists(self.hdf):
os.remove(self.hdf)
def tearDown(self):
if os.path.exists(self.hdf):
os.remove(self.hdf)
def test_imported_hdf(self):
import_aps8bm_xanes_file(self.txrm_file,
ref_filename=self.txrm_ref, hdf_filename=self.hdf,
quiet=True)
# Check that the file was created
self.assertTrue(os.path.exists(self.hdf))
with h5py.File(self.hdf, mode='r') as f:
group = f['aps-8BM-sample/imported']
parent = f['aps-8BM-sample']
# Check metadata about beamline
self.assertEqual(parent.attrs['technique'], 'Full-field TXM')
self.assertEqual(parent.attrs['xanespy_version'], CURRENT_VERSION)
self.assertEqual(parent.attrs['beamline'], "APS 8-BM-B")
self.assertEqual(parent.attrs['original_file'], self.txrm_file)
# Check h5 data structure
keys = list(group.keys())
self.assertIn('intensities', keys)
self.assertEqual(group['intensities'].shape, (1, 3, 1024, 1024))
self.assertIn('references', keys)
self.assertIn('optical_depths', keys)
self.assertEqual(group['pixel_sizes'].attrs['unit'], 'µm')
self.assertEqual(group['pixel_sizes'].shape, (1, 3))
self.assertTrue(np.any(group['pixel_sizes'].value > 0))
expected_Es = np.array([[8312.9287109, 8363.0078125, 8412.9541016]])
np.testing.assert_almost_equal(group['energies'].value, expected_Es)
self.assertIn('timestamps', keys)
expected_timestamp = np.array([
[b'2017-07-09 00:49:02', b'2017-07-09 00:49:30', b'2017-07-09 00:49:58'],
], dtype="S32")
np.testing.assert_equal(group['timestamps'].value,
expected_timestamp)
self.assertIn('filenames', keys)
self.assertIn('original_positions', keys)
self.assertEqual(group['original_positions'].shape, (1, 3, 3))
class APS8BMDirImportTest(TestCase):
"""Check that the program can import a collection of SSRL frames from
a directory."""
def setUp(self):
self.hdf = os.path.join(APS_DIR, 'testdata.h5')
if os.path.exists(self.hdf):
os.remove(self.hdf)
def tearDown(self):
if os.path.exists(self.hdf):
os.remove(self.hdf)
def test_import_empty_directory(self):
"""Check that the proper exception is raised if the directory has no
TXM files in it."""
EMPTY_DIR = 'temp-empty-dir'
os.mkdir(EMPTY_DIR)
try:
with self.assertRaisesRegex(exceptions.DataNotFoundError,
'/temp-empty-dir'):
import_aps8bm_xanes_dir(EMPTY_DIR,
hdf_filename="test-file.hdf",
quiet=True)
finally:
# Clean up by deleting any temporary files/directories
if os.path.exists('test-file.hdf'):
os.remove('test-file.hdf')
os.rmdir(EMPTY_DIR)
def test_imported_references(self):
import_aps8bm_xanes_dir(APS_DIR, hdf_filename=self.hdf, quiet=True)
with h5py.File(self.hdf, mode='r') as f:
self.assertIn('references', f['fov03/imported'].keys())
def test_groupname_kwarg(self):
"""The groupname keyword argument needs some special attention."""
with self.assertRaisesRegex(exceptions.CreateGroupError, 'Invalid groupname'):
import_aps8bm_xanes_dir(APS_DIR, hdf_filename=self.hdf,
quiet=True, groupname="Wawa")
# Now does it work with the {} inserted
import_aps8bm_xanes_dir(APS_DIR, hdf_filename=self.hdf,
quiet=True, groupname="Wawa{}")
def test_imported_hdf(self):
import_aps8bm_xanes_dir(APS_DIR, hdf_filename=self.hdf, quiet=True)
# Check that the file was created
self.assertTrue(os.path.exists(self.hdf))
with h5py.File(self.hdf, mode='r') as f:
group = f['fov03/imported']
parent = f['fov03']
# Check metadata about beamline
self.assertEqual(parent.attrs['technique'], 'Full-field TXM')
self.assertEqual(parent.attrs['xanespy_version'], CURRENT_VERSION)
self.assertEqual(parent.attrs['beamline'], "APS 8-BM-B")
self.assertEqual(parent.attrs['original_directory'], APS_DIR)
# Check h5 data structure
keys = list(group.keys())
self.assertIn('intensities', keys)
self.assertEqual(group['intensities'].shape, (2, 2, 1024, 1024))
self.assertIn('references', keys)
self.assertIn('optical_depths', keys)
self.assertEqual(group['pixel_sizes'].attrs['unit'], 'µm')
self.assertEqual(group['pixel_sizes'].shape, (2,2))
self.assertTrue(np.any(group['pixel_sizes'].value > 0))
expected_Es = np.array([[8249.9365234375, 8353.0322265625],
[8249.9365234375, 8353.0322265625]])
self.assertTrue(np.array_equal(group['energies'].value, expected_Es))
self.assertIn('timestamps', keys)
expected_timestamp = np.array([
[[b'2016-07-02 21:31:36', b'2016-07-02 21:32:26'],
[b'2016-07-02 22:50:35', b'2016-07-02 22:51:25']],
[[b'2016-07-03 03:19:23', b'2016-07-03 03:19:58'],
[b'2016-07-03 04:21:21', b'2016-07-03 04:21:56']],
], dtype="S32")
np.testing.assert_equal(group['timestamps'].value,
expected_timestamp)
self.assertIn('filenames', keys)
self.assertIn('original_positions', keys)
# self.assertIn('relative_positions', keys)
# self.assertEqual(group['relative_positions'].shape, (2, 3))
def test_params_from_aps(self):
"""Check that the new naming scheme is decoded properly."""
ref_filename = "ref_xanesocv_8250_0eV.xrm"
result = decode_aps_params(ref_filename)
expected = {
'timestep_name': 'ocv',
'position_name': 'ref',
'is_background': True,
'energy': 8250.0,
}
self.assertEqual(result, expected)
# An example reference filename from 2015-11-11 beamtime
ref_filename = 'ncm111-cell1-chargeC15/operando-xanes00/20151111_UIC_XANES00_bkg_8313.xrm'
result = decode_aps_params(ref_filename)
self.assertTrue(result['is_background'])
self.assertEqual(result['energy'], 8313.0)
self.assertEqual(result['position_name'], 'bkg')
self.assertEqual(result['timestep_name'], '00')
# An example reference filename from 2015-11-11 beamtime
sam_filename = 'ncm111-cell1-chargeC15/operando-xanes05/20151111_UIC_XANES05_sam02_8381.xrm'
result = decode_aps_params(sam_filename)
self.assertFalse(result['is_background'])
self.assertEqual(result['energy'], 8381.0)
self.assertEqual(result['position_name'], 'sam02')
self.assertEqual(result['timestep_name'], '05')
def test_file_metadata(self):
filenames = [os.path.join(APS_DIR, 'fov03_xanessoc01_8353_0eV.xrm')]
df = read_metadata(filenames=filenames, flavor='aps', quiet=True)
self.assertIsInstance(df, pd.DataFrame)
row = df.iloc[0]
self.assertIn('shape', row.keys())
self.assertIn('timestep_name', row.keys())
# Check the correct start time
chicago_tz = pytz.timezone('US/Central')
realtime = chicago_tz.localize(dt.datetime(2016, 7, 2, 23, 21, 21))
realtime = realtime.astimezone(pytz.utc).replace(tzinfo=None)
# Convert to unix timestamp
self.assertIsInstance(row['starttime'], pd.Timestamp)
self.assertEqual(row['starttime'], realtime)
class SSRLImportTest(TestCase):
"""Check that the program can import a collection of SSRL frames from
a directory."""
def setUp(self):
self.hdf = os.path.join(SSRL_DIR, 'testdata.h5')
if os.path.exists(self.hdf):
os.remove(self.hdf)
def tearDown(self):
if os.path.exists(self.hdf):
os.remove(self.hdf)
def test_minimum_shape(self):
shape_list = [(1024, 512), (1024, 1024), (2048, 2048)]
min_shape = minimum_shape(shape_list)
self.assertEqual(min_shape, (1024, 512))
# Check with incompatible shape dimensions
shape_list = [(1024, 1024), (1024, 1024), (2048, 2048, 2048)]
with self.assertRaises(exceptions.ShapeMismatchError):
minimum_shape(shape_list)
# Check that non-power-of-two shapes raise an exception
shape_list = [(5, 1024), (1024, 1024), (2048, 2048)]
with self.assertRaises(exceptions.ShapeMismatchError):
minimum_shape(shape_list)
# Check with using named tuples
shape_list = [utilities.shape(1024, 1024), utilities.shape(1024, 1024)]
min_shape = minimum_shape(shape_list)
print(min_shape)
def test_rebin_image(self):
my_list = [1, 2, 2, 3, 3, 3]
# Test a symmetrical reshape
img = np.ones((64, 64))
new_img = rebin_image(img, (32, 32))
self.assertEqual(new_img.shape, (32, 32))
# Test an asymmetrical reshape
img = np.ones((64, 64))
new_img = rebin_image(img, (32, 16))
self.assertEqual(new_img.shape, (32, 16))
def test_imported_hdf(self):
with warnings.catch_warnings() as w:
# warnings.simplefilter('ignore', RuntimeWarning, 104)
warnings.filterwarnings('ignore',
message='Ignoring invalid file .*',
category=RuntimeWarning)
import_ssrl_xanes_dir(SSRL_DIR, hdf_filename=self.hdf, quiet=True)
# Check that the file was created
self.assertTrue(os.path.exists(self.hdf))
with h5py.File(self.hdf, mode='r') as f:
group = f['ssrl-test-data/imported']
parent = f['ssrl-test-data']
# Check metadata about beamline
self.assertEqual(parent.attrs['technique'], 'Full-field TXM')
self.assertEqual(parent.attrs['xanespy_version'], CURRENT_VERSION)
self.assertEqual(parent.attrs['beamline'], "SSRL 6-2c")
self.assertEqual(parent.attrs['original_directory'], SSRL_DIR)
# Check imported data structure
keys = list(group.keys())
self.assertIn('intensities', keys)
self.assertEqual(group['intensities'].attrs['context'], 'frameset')
self.assertEqual(group['intensities'].shape, (1, 2, 1024, 1024))
self.assertIn('references', keys)
self.assertEqual(group['references'].attrs['context'], 'frameset')
self.assertIn('optical_depths', keys)
self.assertEqual(group['optical_depths'].attrs['context'], 'frameset')
self.assertEqual(group['pixel_sizes'].attrs['unit'], 'µm')
self.assertEqual(group['pixel_sizes'].attrs['context'], 'metadata')
isEqual = np.array_equal(group['energies'].value,
np.array([[8324., 8354.]]))
self.assertTrue(isEqual, msg=group['energies'].value)
self.assertEqual(group['energies'].attrs['context'], 'metadata')
self.assertIn('timestamps', keys)
self.assertEqual(group['timestamps'].attrs['context'], 'metadata')
self.assertIn('filenames', keys)
self.assertEqual(group['filenames'].attrs['context'], 'metadata')
self.assertIn('original_positions', keys)
self.assertEqual(group['original_positions'].attrs['context'], 'metadata')
self.assertIn('relative_positions', keys)
self.assertEqual(group['relative_positions'].attrs['context'], 'metadata')
self.assertIn('timestep_names', keys)
self.assertEqual(group['relative_positions'].attrs['context'], 'metadata')
self.assertEqual(group['timestep_names'][0], "rep01")
def test_params_from_ssrl(self):
# First a reference frame
ref_filename = "rep01_000001_ref_201511202114_NCA_INSITU_OCV_FOV01_Ni_08250.0_eV_001of010.xrm"
result = decode_ssrl_params(ref_filename)
expected = {
'timestep_name': 'rep01',
'position_name': 'NCA_INSITU_OCV_FOV01_Ni',
'is_background': True,
'energy': 8250.0,
}
self.assertEqual(result, expected)
# Now a sample field of view
sample_filename = "rep01_201511202114_NCA_INSITU_OCV_FOV01_Ni_08250.0_eV_001of010.xrm"
result = decode_ssrl_params(sample_filename)
expected = {
'timestep_name': 'rep01',
'position_name': 'NCA_INSITU_OCV_FOV01_Ni',
'is_background': False,
'energy': 8250.0,
}
self.assertEqual(result, expected)
# This one was a problem, from 2017-04-05
sample_filename = (
"NCA_Pg71-5/Pg71-5_NCA_charge2_XANES_170405_1515/"
"rep01_Pg71-5_NCA_charge2_08250.0_eV_001of005.xrm")
result = decode_ssrl_params(sample_filename)
expected = {
'timestep_name': 'rep01',
'position_name': 'Pg71-5_NCA_charge2',
'is_background': False,
'energy': 8250.0,
}
self.assertEqual(result, expected)
# This reference was also a problem
ref_filename = 'rep01_000001_ref_Pg71-5_NCA_charge2_08250.0_eV_001of010.xrm'
result = decode_ssrl_params(ref_filename)
expected = {
'timestep_name': 'rep01',
'position_name': 'Pg71-5_NCA_charge2',
'is_background': True,
'energy': 8250.0,
}
self.assertEqual(result, expected)
# Another bad reference file
ref_filename = 'rep02_000182_ref_201604061951_Pg71-8_NCA_charge1_08400.0_eV_002of010.xrm'
result = decode_ssrl_params(ref_filename)
expected = {
'timestep_name': 'rep02',
'position_name': 'Pg71-8_NCA_charge1',
'is_background': True,
'energy': 8400.0,
}
self.assertEqual(result, expected)
def test_magnification_correction(self):
# Prepare some fake data
img1 = [[1,1,1,1,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,1,1,1,1]]
img2 = [[0,0,0,0,0],
[0,1,1,1,0],
[0,1,0,1,0],
[0,1,1,1,0],
[0,0,0,0,0]]
imgs = np.array([[img1, img2], [img1, img2]], dtype=np.float)
pixel_sizes = np.array([[1, 2], [1, 2]])
scales, translations = magnification_correction(imgs, pixel_sizes)
# Check that the right shape result is returns
self.assertEqual(scales.shape, (2, 2, 2))
np.testing.assert_equal(scales[..., 0], scales[..., 1])
# Check that the first result is not corrected
np.testing.assert_equal(scales[0, 0], (1., 1.))
np.testing.assert_equal(translations[0, 0], (0, 0))
# # Check the values for translation and scale for the changed image
np.testing.assert_equal(scales[0, 1], (0.5, 0.5))
np.testing.assert_equal(translations[0,1], (1., 1.))
def test_bad_file(self):
# One specific file is not saved properly
filenames = [
# No image data nor timestamp
'rep02_000072_ref_20161456_ssrl-test-data_08348.0_eV_002of010.xrm',
# Valid file
'rep01_000351_ref_20161456_ssrl-test-data_08354.0_eV_001of010.xrm',
# Malformed image data
# 'rep02_000182_ref_20161456_ssrl-test-data_08400.0_eV_002of010.xrm',
]
filenames = [os.path.join(SSRL_DIR, f) for f in filenames]
# Check that the importer warns the user of the bad file
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
result = read_metadata(filenames, flavor='ssrl', quiet=True)
self.assertTrue(len(ws) > 0)
self.assertTrue(any([w.category == RuntimeWarning for w in ws]))
self.assertTrue(any(['Ignoring invalid file' in str(w.message) for w in ws]))
# Check that the bad entries was excluded from the processed list
self.assertEqual(len(result), 1)
class SxstmFileTestCase(unittest.TestCase):
"""Tests for soft x-ray tunneling microscope data from APS 4-ID-C."""
def test_header(self):
filename = os.path.join(SXSTM_DIR, 'XGSS_UIC_JC_475v_60c_001_001_001.3ds')
sxstm_data = SxstmDataFile(filename=filename)
header = sxstm_data.header_lines()
self.assertEqual(len(header), 33)
data = sxstm_data.dataframe()
sxstm_data.close()
class SxstmImportTestCase(unittest.TestCase):
"""Tests for importing a set of X-ray tunneleing microscopy data from
APS 4-ID-C.
"""
hdf_filename = os.path.join(SXSTM_DIR, 'sxstm_imported.h5')
parent_groupname = 'sxstm-test-data'
def tearDown(self):
if os.path.exists(self.hdf_filename):
os.remove(self.hdf_filename)
def test_hdf_file(self):
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='X and Y pixel sizes')
import_aps4idc_sxstm_files(filenames=os.path.join(TEST_DIR, 'sxstm-data-4idc'),
hdf_filename=self.hdf_filename,
hdf_groupname=self.parent_groupname,
shape=(2, 2),
energies=[8324., 8354.])
# Check that the file exists with the data group
self.assertTrue(os.path.exists(self.hdf_filename))
with h5py.File(self.hdf_filename, mode='r') as f:
# Check that the group structure is correct
self.assertIn(self.parent_groupname, list(f.keys()))
parent = f[self.parent_groupname]
self.assertIn('imported', list(parent.keys()),
"Importer didn't create '/%s/imported'" % self.parent_groupname)
# Check metadata about beamline
self.assertEqual(parent.attrs['technique'],
'Synchrotron X-ray Scanning Tunneling Microscopy')
self.assertEqual(parent.attrs['xanespy_version'], CURRENT_VERSION)
self.assertEqual(parent.attrs['beamline'], "APS 4-ID-C")
self.assertEqual(parent.attrs['latest_data_name'], 'imported')
full_path = os.path.abspath(SXSTM_DIR)
self.assertEqual(parent.attrs['original_directory'], full_path)
# Check that the datasets are created
group = parent['imported']
keys = list(group.keys())
columns = ['bias_calc', 'current', 'LIA_tip_ch1',
'LIA_tip_ch2', 'LIA_sample', 'LIA_shielding',
'LIA_topo', 'shielding', 'flux', 'bias',
'height']
for col in columns:
self.assertIn(col, list(group.keys()),
"Importer didn't create '/%s/imported/%s'"
"" % (self.parent_groupname, col))
self.assertEqual(group[col].attrs['context'], 'frameset')
self.assertEqual(group[col].dtype, 'float32')
self.assertEqual(group[col].shape, (1, 2, 2, 2))
self.assertTrue(np.any(group[col]))
self.assertEqual(group['pixel_sizes'].attrs['unit'], 'µm')
self.assertEqual(group['pixel_sizes'].attrs['context'], 'metadata')
isEqual = np.array_equal(group['energies'].value,
np.array([[8324., 8354.]]))
self.assertTrue(isEqual, msg=group['energies'].value)
self.assertEqual(group['energies'].attrs['context'], 'metadata')
self.assertIn('filenames', keys)
self.assertEqual(group['filenames'].attrs['context'], 'metadata')
self.assertIn('timestep_names', keys)
self.assertEqual(group['timestep_names'].attrs['context'], 'metadata')
self.assertEqual(group['timestep_names'][0], b"ex-situ")
# self.assertIn('timestamps', keys)
# self.assertEqual(group['timestamps'].attrs['context'], 'metadata')
# self.assertIn('original_positions', keys)
# self.assertEqual(group['original_positions'].attrs['context'], 'metadata')
# self.assertIn('relative_positions', keys)
# self.assertEqual(group['relative_positions'].attrs['context'], 'metadata')
def test_file_list(self):
"""See if a file list can be passed instead of a directory name."""
filelist = [os.path.join(SXSTM_DIR, f) for f in os.listdir(SXSTM_DIR)]
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='X and Y pixel sizes')
import_aps4idc_sxstm_files(filenames=filelist,
hdf_filename=self.hdf_filename,
hdf_groupname=self.parent_groupname,
shape=(2, 2),
energies=[8324., 8354.])
| gpl-3.0 |
ywcui1990/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/rcsetup.py | 69 | 23344 | """
The rcsetup module contains the default values and the validation code for
customization using matplotlib's rc settings.
Each rc setting is assigned a default value and a function used to validate any
attempted changes to that setting. The default values and validation functions
are defined in the rcsetup module, and are used to construct the rcParams global
object which stores the settings and is referenced throughout matplotlib.
These default values should be consistent with the default matplotlibrc file
that actually reflects the values given here. Any additions or deletions to the
parameter set listed here should also be visited to the
:file:`matplotlibrc.template` in matplotlib's root source directory.
"""
import os
import warnings
from matplotlib.fontconfig_pattern import parse_fontconfig_pattern
from matplotlib.colors import is_color_like
#interactive_bk = ['gtk', 'gtkagg', 'gtkcairo', 'fltkagg', 'qtagg', 'qt4agg',
# 'tkagg', 'wx', 'wxagg', 'cocoaagg']
# The capitalized forms are needed for ipython at present; this may
# change for later versions.
interactive_bk = ['GTK', 'GTKAgg', 'GTKCairo', 'FltkAgg', 'MacOSX',
'QtAgg', 'Qt4Agg', 'TkAgg', 'WX', 'WXAgg', 'CocoaAgg']
non_interactive_bk = ['agg', 'cairo', 'emf', 'gdk',
'pdf', 'ps', 'svg', 'template']
all_backends = interactive_bk + non_interactive_bk
class ValidateInStrings:
def __init__(self, key, valid, ignorecase=False):
'valid is a list of legal strings'
self.key = key
self.ignorecase = ignorecase
def func(s):
if ignorecase: return s.lower()
else: return s
self.valid = dict([(func(k),k) for k in valid])
def __call__(self, s):
if self.ignorecase: s = s.lower()
if s in self.valid: return self.valid[s]
raise ValueError('Unrecognized %s string "%s": valid strings are %s'
% (self.key, s, self.valid.values()))
def validate_path_exists(s):
'If s is a path, return s, else False'
if os.path.exists(s): return s
else:
raise RuntimeError('"%s" should be a path but it does not exist'%s)
def validate_bool(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_bool_maybe_none(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b=='none': return None
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_float(s):
'convert s to float or raise'
try: return float(s)
except ValueError:
raise ValueError('Could not convert "%s" to float' % s)
def validate_int(s):
'convert s to int or raise'
try: return int(s)
except ValueError:
raise ValueError('Could not convert "%s" to int' % s)
def validate_fonttype(s):
'confirm that this is a Postscript of PDF font type that we know how to convert to'
fonttypes = { 'type3': 3,
'truetype': 42 }
try:
fonttype = validate_int(s)
except ValueError:
if s.lower() in fonttypes.keys():
return fonttypes[s.lower()]
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.keys())
else:
if fonttype not in fonttypes.values():
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.values())
return fonttype
#validate_backend = ValidateInStrings('backend', all_backends, ignorecase=True)
_validate_standard_backends = ValidateInStrings('backend', all_backends, ignorecase=True)
def validate_backend(s):
if s.startswith('module://'): return s
else: return _validate_standard_backends(s)
validate_numerix = ValidateInStrings('numerix',[
'Numeric','numarray','numpy',
], ignorecase=True)
validate_toolbar = ValidateInStrings('toolbar',[
'None','classic','toolbar2',
], ignorecase=True)
def validate_autolayout(v):
if v:
warnings.warn("figure.autolayout is not currently supported")
class validate_nseq_float:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n floats or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [float(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to floats')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [float(val) for val in s]
class validate_nseq_int:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n ints or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [int(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to ints')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [int(val) for val in s]
def validate_color(s):
'return a valid color arg'
if s.lower() == 'none':
return 'None'
if is_color_like(s):
return s
stmp = '#' + s
if is_color_like(stmp):
return stmp
# If it is still valid, it must be a tuple.
colorarg = s
msg = ''
if s.find(',')>=0:
# get rid of grouping symbols
stmp = ''.join([ c for c in s if c.isdigit() or c=='.' or c==','])
vals = stmp.split(',')
if len(vals)!=3:
msg = '\nColor tuples must be length 3'
else:
try:
colorarg = [float(val) for val in vals]
except ValueError:
msg = '\nCould not convert all entries to floats'
if not msg and is_color_like(colorarg):
return colorarg
raise ValueError('%s does not look like a color arg%s'%(s, msg))
def validate_stringlist(s):
'return a list'
if type(s) is str:
return [ v.strip() for v in s.split(',') ]
else:
assert type(s) in [list,tuple]
return [ str(v) for v in s ]
validate_orientation = ValidateInStrings('orientation',[
'landscape', 'portrait',
])
def validate_aspect(s):
if s in ('auto', 'equal'):
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid aspect specification')
def validate_fontsize(s):
if type(s) is str:
s = s.lower()
if s in ['xx-small', 'x-small', 'small', 'medium', 'large', 'x-large',
'xx-large', 'smaller', 'larger']:
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid font size')
def validate_font_properties(s):
parse_fontconfig_pattern(s)
return s
validate_fontset = ValidateInStrings('fontset', ['cm', 'stix', 'stixsans', 'custom'])
validate_verbose = ValidateInStrings('verbose',[
'silent', 'helpful', 'debug', 'debug-annoying',
])
validate_cairo_format = ValidateInStrings('cairo_format',
['png', 'ps', 'pdf', 'svg'],
ignorecase=True)
validate_ps_papersize = ValidateInStrings('ps_papersize',[
'auto', 'letter', 'legal', 'ledger',
'a0', 'a1', 'a2','a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10',
'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'b10',
], ignorecase=True)
def validate_ps_distiller(s):
if type(s) is str:
s = s.lower()
if s in ('none',None):
return None
elif s in ('false', False):
return False
elif s in ('ghostscript', 'xpdf'):
return s
else:
raise ValueError('matplotlibrc ps.usedistiller must either be none, ghostscript or xpdf')
validate_joinstyle = ValidateInStrings('joinstyle',['miter', 'round', 'bevel'], ignorecase=True)
validate_capstyle = ValidateInStrings('capstyle',['butt', 'round', 'projecting'], ignorecase=True)
validate_negative_linestyle = ValidateInStrings('negative_linestyle',['solid', 'dashed'], ignorecase=True)
def validate_negative_linestyle_legacy(s):
try:
res = validate_negative_linestyle(s)
return res
except ValueError:
dashes = validate_nseq_float(2)(s)
warnings.warn("Deprecated negative_linestyle specification; use 'solid' or 'dashed'")
return (0, dashes) # (offset, (solid, blank))
validate_legend_loc = ValidateInStrings('legend_loc',[
'best',
'upper right',
'upper left',
'lower left',
'lower right',
'right',
'center left',
'center right',
'lower center',
'upper center',
'center',
], ignorecase=True)
class ValidateInterval:
"""
Value must be in interval
"""
def __init__(self, vmin, vmax, closedmin=True, closedmax=True):
self.vmin = vmin
self.vmax = vmax
self.cmin = closedmin
self.cmax = closedmax
def __call__(self, s):
try: s = float(s)
except: raise RuntimeError('Value must be a float; found "%s"'%s)
if self.cmin and s<self.vmin:
raise RuntimeError('Value must be >= %f; found "%f"'%(self.vmin, s))
elif not self.cmin and s<=self.vmin:
raise RuntimeError('Value must be > %f; found "%f"'%(self.vmin, s))
if self.cmax and s>self.vmax:
raise RuntimeError('Value must be <= %f; found "%f"'%(self.vmax, s))
elif not self.cmax and s>=self.vmax:
raise RuntimeError('Value must be < %f; found "%f"'%(self.vmax, s))
return s
# a map from key -> value, converter
defaultParams = {
'backend' : ['Agg', validate_backend], # agg is certainly present
'backend_fallback' : [True, validate_bool], # agg is certainly present
'numerix' : ['numpy', validate_numerix],
'maskedarray' : [False, validate_bool],
'toolbar' : ['toolbar2', validate_toolbar],
'datapath' : [None, validate_path_exists], # handled by _get_data_path_cached
'units' : [False, validate_bool],
'interactive' : [False, validate_bool],
'timezone' : ['UTC', str],
# the verbosity setting
'verbose.level' : ['silent', validate_verbose],
'verbose.fileo' : ['sys.stdout', str],
# line props
'lines.linewidth' : [1.0, validate_float], # line width in points
'lines.linestyle' : ['-', str], # solid line
'lines.color' : ['b', validate_color], # blue
'lines.marker' : ['None', str], # black
'lines.markeredgewidth' : [0.5, validate_float],
'lines.markersize' : [6, validate_float], # markersize, in points
'lines.antialiased' : [True, validate_bool], # antialised (no jaggies)
'lines.dash_joinstyle' : ['miter', validate_joinstyle],
'lines.solid_joinstyle' : ['miter', validate_joinstyle],
'lines.dash_capstyle' : ['butt', validate_capstyle],
'lines.solid_capstyle' : ['projecting', validate_capstyle],
# patch props
'patch.linewidth' : [1.0, validate_float], # line width in points
'patch.edgecolor' : ['k', validate_color], # black
'patch.facecolor' : ['b', validate_color], # blue
'patch.antialiased' : [True, validate_bool], # antialised (no jaggies)
# font props
'font.family' : ['sans-serif', str], # used by text object
'font.style' : ['normal', str], #
'font.variant' : ['normal', str], #
'font.stretch' : ['normal', str], #
'font.weight' : ['normal', str], #
'font.size' : [12.0, validate_float], #
'font.serif' : [['Bitstream Vera Serif', 'DejaVu Serif',
'New Century Schoolbook', 'Century Schoolbook L',
'Utopia', 'ITC Bookman', 'Bookman',
'Nimbus Roman No9 L','Times New Roman',
'Times','Palatino','Charter','serif'],
validate_stringlist],
'font.sans-serif' : [['Bitstream Vera Sans', 'DejaVu Sans',
'Lucida Grande', 'Verdana', 'Geneva', 'Lucid',
'Arial', 'Helvetica', 'Avant Garde', 'sans-serif'],
validate_stringlist],
'font.cursive' : [['Apple Chancery','Textile','Zapf Chancery',
'Sand','cursive'], validate_stringlist],
'font.fantasy' : [['Comic Sans MS','Chicago','Charcoal','Impact'
'Western','fantasy'], validate_stringlist],
'font.monospace' : [['Bitstream Vera Sans Mono', 'DejaVu Sans Mono',
'Andale Mono', 'Nimbus Mono L', 'Courier New',
'Courier','Fixed', 'Terminal','monospace'],
validate_stringlist],
# text props
'text.color' : ['k', validate_color], # black
'text.usetex' : [False, validate_bool],
'text.latex.unicode' : [False, validate_bool],
'text.latex.preamble' : [[''], validate_stringlist],
'text.dvipnghack' : [None, validate_bool_maybe_none],
'text.fontstyle' : ['normal', str],
'text.fontangle' : ['normal', str],
'text.fontvariant' : ['normal', str],
'text.fontweight' : ['normal', str],
'text.fontsize' : ['medium', validate_fontsize],
'mathtext.cal' : ['cursive', validate_font_properties],
'mathtext.rm' : ['serif', validate_font_properties],
'mathtext.tt' : ['monospace', validate_font_properties],
'mathtext.it' : ['serif:italic', validate_font_properties],
'mathtext.bf' : ['serif:bold', validate_font_properties],
'mathtext.sf' : ['sans\-serif', validate_font_properties],
'mathtext.fontset' : ['cm', validate_fontset],
'mathtext.fallback_to_cm' : [True, validate_bool],
'image.aspect' : ['equal', validate_aspect], # equal, auto, a number
'image.interpolation' : ['bilinear', str],
'image.cmap' : ['jet', str], # one of gray, jet, etc
'image.lut' : [256, validate_int], # lookup table
'image.origin' : ['upper', str], # lookup table
'image.resample' : [False, validate_bool],
'contour.negative_linestyle' : ['dashed', validate_negative_linestyle_legacy],
# axes props
'axes.axisbelow' : [False, validate_bool],
'axes.hold' : [True, validate_bool],
'axes.facecolor' : ['w', validate_color], # background color; white
'axes.edgecolor' : ['k', validate_color], # edge color; black
'axes.linewidth' : [1.0, validate_float], # edge linewidth
'axes.titlesize' : ['large', validate_fontsize], # fontsize of the axes title
'axes.grid' : [False, validate_bool], # display grid or not
'axes.labelsize' : ['medium', validate_fontsize], # fontsize of the x any y labels
'axes.labelcolor' : ['k', validate_color], # color of axis label
'axes.formatter.limits' : [[-7, 7], validate_nseq_int(2)],
# use scientific notation if log10
# of the axis range is smaller than the
# first or larger than the second
'axes.unicode_minus' : [True, validate_bool],
'polaraxes.grid' : [True, validate_bool], # display polar grid or not
#legend properties
'legend.fancybox' : [False,validate_bool],
'legend.loc' : ['upper right',validate_legend_loc], # at some point, this should be changed to 'best'
'legend.isaxes' : [True,validate_bool], # this option is internally ignored - it never served any useful purpose
'legend.numpoints' : [2, validate_int], # the number of points in the legend line
'legend.fontsize' : ['large', validate_fontsize],
'legend.pad' : [0, validate_float], # was 0.2, deprecated; the fractional whitespace inside the legend border
'legend.borderpad' : [0.4, validate_float], # units are fontsize
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
# the following dimensions are in axes coords
'legend.labelsep' : [0.010, validate_float], # the vertical space between the legend entries
'legend.handlelen' : [0.05, validate_float], # the length of the legend lines
'legend.handletextsep' : [0.02, validate_float], # the space between the legend line and legend text
'legend.axespad' : [0.02, validate_float], # the border between the axes and legend edge
'legend.shadow' : [False, validate_bool],
'legend.labelspacing' : [0.5, validate_float], # the vertical space between the legend entries
'legend.handlelength' : [2., validate_float], # the length of the legend lines
'legend.handletextpad' : [.8, validate_float], # the space between the legend line and legend text
'legend.borderaxespad' : [0.5, validate_float], # the border between the axes and legend edge
'legend.columnspacing' : [2., validate_float], # the border between the axes and legend edge
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
# the following dimensions are in axes coords
'legend.labelsep' : [0.010, validate_float], # the vertical space between the legend entries
'legend.handlelen' : [0.05, validate_float], # the length of the legend lines
'legend.handletextsep' : [0.02, validate_float], # the space between the legend line and legend text
'legend.axespad' : [0.5, validate_float], # the border between the axes and legend edge
'legend.shadow' : [False, validate_bool],
# tick properties
'xtick.major.size' : [4, validate_float], # major xtick size in points
'xtick.minor.size' : [2, validate_float], # minor xtick size in points
'xtick.major.pad' : [4, validate_float], # distance to label in points
'xtick.minor.pad' : [4, validate_float], # distance to label in points
'xtick.color' : ['k', validate_color], # color of the xtick labels
'xtick.labelsize' : ['medium', validate_fontsize], # fontsize of the xtick labels
'xtick.direction' : ['in', str], # direction of xticks
'ytick.major.size' : [4, validate_float], # major ytick size in points
'ytick.minor.size' : [2, validate_float], # minor ytick size in points
'ytick.major.pad' : [4, validate_float], # distance to label in points
'ytick.minor.pad' : [4, validate_float], # distance to label in points
'ytick.color' : ['k', validate_color], # color of the ytick labels
'ytick.labelsize' : ['medium', validate_fontsize], # fontsize of the ytick labels
'ytick.direction' : ['in', str], # direction of yticks
'grid.color' : ['k', validate_color], # grid color
'grid.linestyle' : [':', str], # dotted
'grid.linewidth' : [0.5, validate_float], # in points
# figure props
# figure size in inches: width by height
'figure.figsize' : [ [8.0,6.0], validate_nseq_float(2)],
'figure.dpi' : [ 80, validate_float], # DPI
'figure.facecolor' : [ '0.75', validate_color], # facecolor; scalar gray
'figure.edgecolor' : [ 'w', validate_color], # edgecolor; white
'figure.autolayout' : [ False, validate_autolayout],
'figure.subplot.left' : [0.125, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.right' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.bottom' : [0.1, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.top' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.wspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'figure.subplot.hspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'savefig.dpi' : [100, validate_float], # DPI
'savefig.facecolor' : ['w', validate_color], # facecolor; white
'savefig.edgecolor' : ['w', validate_color], # edgecolor; white
'savefig.orientation' : ['portrait', validate_orientation], # edgecolor; white
'cairo.format' : ['png', validate_cairo_format],
'tk.window_focus' : [False, validate_bool], # Maintain shell focus for TkAgg
'tk.pythoninspect' : [False, validate_bool], # Set PYTHONINSPECT
'ps.papersize' : ['letter', validate_ps_papersize], # Set the papersize/type
'ps.useafm' : [False, validate_bool], # Set PYTHONINSPECT
'ps.usedistiller' : [False, validate_ps_distiller], # use ghostscript or xpdf to distill ps output
'ps.distiller.res' : [6000, validate_int], # dpi
'ps.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'pdf.compression' : [6, validate_int], # compression level from 0 to 9; 0 to disable
'pdf.inheritcolor' : [False, validate_bool], # ignore any color-setting commands from the frontend
'pdf.use14corefonts' : [False, validate_bool], # use only the 14 PDF core fonts
# embedded in every PDF viewing application
'pdf.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'svg.image_inline' : [True, validate_bool], # write raster image data directly into the svg file
'svg.image_noscale' : [False, validate_bool], # suppress scaling of raster data embedded in SVG
'svg.embed_char_paths' : [True, validate_bool], # True to save all characters as paths in the SVG
'docstring.hardcopy' : [False, validate_bool], # set this when you want to generate hardcopy docstring
'plugins.directory' : ['.matplotlib_plugins', str], # where plugin directory is locate
'path.simplify' : [False, validate_bool],
'agg.path.chunksize' : [0, validate_int] # 0 to disable chunking;
# recommend about 20000 to
# enable. Experimental.
}
if __name__ == '__main__':
rc = defaultParams
rc['datapath'][0] = '/'
for key in rc:
if not rc[key][1](rc[key][0]) == rc[key][0]:
print "%s: %s != %s"%(key, rc[key][1](rc[key][0]), rc[key][0])
| agpl-3.0 |
soulmachine/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 7 | 21070 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
"""Check classification for various parameter settings."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
"""Check classification for various parameter settings on sparse input."""
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
def test_regression():
"""Check regression for various parameter settings."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
"""Check regression for various parameter settings on sparse input."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
"""Test that bootstraping samples generate non-perfect base estimators."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
"""Test that bootstraping features may generate dupplicate features."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
"""Predict probabilities."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
"""Check that oob prediction is a good estimation of the generalization
error."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
"""Check that oob prediction is a good estimation of the generalization
error."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
"""Check singleton ensembles."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
"""Test that it gives proper exception on deficient input."""
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_raises(NotImplementedError,
BaggingClassifier(base).fit(X, y).decision_function, X)
def test_parallel_classification():
"""Check parallel classification."""
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
"""Check parallel regression."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
"""Check that bagging ensembles can be grid-searched."""
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
"""Check base_estimator and its default values."""
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
bartslinger/paparazzi | sw/airborne/test/math/compare_utm_enu.py | 77 | 2714 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import sys
import os
PPRZ_SRC = os.getenv("PAPARAZZI_SRC", "../../../..")
sys.path.append(PPRZ_SRC + "/sw/lib/python")
from pprz_math.geodetic import *
from pprz_math.algebra import DoubleRMat, DoubleEulers, DoubleVect3
from math import radians, degrees, tan
import matplotlib.pyplot as plt
import numpy as np
# Origin at ENAC
UTM_EAST0 = 377349 # in m
UTM_NORTH0 = 4824583 # in m
UTM_ZONE0 = 31
ALT0 = 147.000 # in m
utm_origin = UtmCoor_d(north=UTM_NORTH0, east=UTM_EAST0, alt=ALT0, zone=UTM_ZONE0)
print("origin %s" % utm_origin)
lla_origin = utm_origin.to_lla()
ecef_origin = lla_origin.to_ecef()
ltp_origin = ecef_origin.to_ltp_def()
print(ltp_origin)
# convergence angle to "true north" is approx 1 deg here
earth_radius = 6378137.0
n = 0.9996 * earth_radius
UTM_DELTA_EAST = 500000.
dist_to_meridian = utm_origin.east - UTM_DELTA_EAST
conv = dist_to_meridian / n * tan(lla_origin.lat)
# or (middle meridian of UTM zone 31 is at 3deg)
#conv = atan(tan(lla_origin.lon - radians(3))*sin(lla_origin.lat))
print("approx. convergence angle (north error compared to meridian): %f deg" % degrees(conv))
# Rotation matrix to correct for "true north"
R = DoubleEulers(psi=-conv).to_rmat()
# calculate ENU coordinates for 100 points in 100m distance
nb_points = 100
dist_points = 100
enu_res = np.zeros((nb_points, 2))
enu_res_c = np.zeros((nb_points, 2))
utm_res = np.zeros((nb_points, 2))
for i in range(0, nb_points):
utm = UtmCoor_d()
utm.north = i * dist_points + utm_origin.north
utm.east = i * dist_points+ utm_origin.east
utm.alt = utm_origin.alt
utm.zone = utm_origin.zone
#print(utm)
utm_res[i, 0] = utm.east - utm_origin.east
utm_res[i, 1] = utm.north - utm_origin.north
lla = utm.to_lla()
#print(lla)
ecef = lla.to_ecef()
enu = ecef.to_enu(ltp_origin)
enu_res[i, 0] = enu.x
enu_res[i, 1] = enu.y
enu_c = R * DoubleVect3(enu.x, enu.y, enu.z)
enu_res_c[i, 0] = enu_c.x
enu_res_c[i, 1] = enu_c.y
#print(enu)
dist = np.linalg.norm(utm_res, axis=1)
error = np.linalg.norm(utm_res - enu_res, axis=1)
error_c = np.linalg.norm(utm_res - enu_res_c, axis=1)
plt.figure(1)
plt.subplot(311)
plt.title("utm vs. enu")
plt.plot(enu_res[:, 0], enu_res[:, 1], 'g', label="ENU")
plt.plot(utm_res[:, 0], utm_res[:, 1], 'r', label="UTM")
plt.ylabel("y/north [m]")
plt.xlabel("x/east [m]")
plt.legend(loc='upper left')
plt.subplot(312)
plt.plot(dist, error, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error [m]")
plt.subplot(313)
plt.plot(dist, error_c, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error with north fix [m]")
plt.show()
| gpl-2.0 |
Adai0808/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
VaclavDedik/classifier | classifier/selectors.py | 1 | 9474 | import numpy as np
from nltk.corpus import stopwords
from sklearn.decomposition import TruncatedSVD
from sklearn import feature_selection
from sklearn.feature_extraction.text import CountVectorizer
import utils
class AbstractSelector(object):
"""Abstract feature selector. When implementing a subclass, you have to
implement method ``build`` that builds a vector of features X labeled by Y.
"""
def build(self, documents):
"""This method is used to build features and labels from **labeled**
documents and return a feature vector with a label for each document.
This method is expected to initialize field ``labels`` which is used
by default implementation of ``get_label`` to get string representation
of provided int representation of the label. It is also expected to
initialize field ``features`` which contains labels of features
(usually words). Both field ``labels`` and ``features`` must be sorted
in ascending order.
:param documents: List of labeled Document objects.
:returns: Tuple of X and Y where X is a list of feature vectors for
each document and Y a column vector containing labels (in
integer).
"""
raise NotImplementedError()
def get_x(self, document):
"""Returns a feature vector for a given document. Default
implementation counts words in provided document (both in title and
content together) that occur in the field ``features``.
Note that before running this method, method ``build`` must be run.
:param document: Document you want a feature vector for.
:returns: Feature vector.
"""
raise NotImplementedError()
def get_label(self, y):
"""Returns string representation of label for integer representation.
Note that before running this method, method ``build`` must be run.
:param y: Integer representation of the label.
:returns: String representation of the given ``y``.
"""
return self.labels[y]
class BasicSelector(AbstractSelector):
"""This implementation of feature selector simply creates a feature vector
from a bag of words. A document is converted into a feature vector by
simply counting the number of words.
"""
def build(self, documents):
self._build_labels(documents)
X, Y = [], []
docs_vector = []
for document in documents:
title = document.title if document.title else ""
content = document.content if document.content else ""
docs_vector.append(title + "\n" + content)
Y.append(self.labels.index(document.label))
self.count_vect = CountVectorizer(decode_error="replace")
X = np.array(self.count_vect.fit_transform(docs_vector).todense())
self.features = sorted(self.count_vect.vocabulary_.keys())
return X, np.transpose([Y])
def get_x(self, document):
"""Counts words in provided document (both in title and content
together) that occur in the field ``features``.
"""
title = document.title if document.title else ""
content = document.content if document.content else ""
words = title + "\n" + content
return np.array(self.count_vect.transform([words]).todense())[0]
def _build_labels(self, documents):
"""Builds labels by sorting all unique occurrences of labels in all
documents.
:param documents: List of Document objects.
"""
labels = {document.label for document in documents}
self.labels = sorted(labels)
def __str__(self):
return "BasicSelector()"
class SelectorDecorator(AbstractSelector):
"""Selector decorator allows us to layer more selectors on top of each
other.
"""
def __init__(self, selector):
self.selector = selector
def build(self, documents):
X, Y = self.selector.build(documents)
self.features = list(self.selector.features)
self.labels = list(self.selector.labels)
return X, Y
def get_x(self, document):
return self.selector.get_x(document)
def get_label(self, y):
return self.selector.get_label(y)
def __str__(self):
return "->%s" % self.selector
class StandardizationDecorator(SelectorDecorator):
"""Performs Standardization on data by subtracting the mean of each
feature and dividing by sample standard deviation.
"""
def build(self, documents):
X, Y = super(StandardizationDecorator, self).build(documents)
n = len(X)
self.m = np.sum(X, axis=0)/float(n)
self.std = np.std(X, axis=0, ddof=1)
X_std = (X - self.m)/self.std
return X_std, Y
def get_x(self, document):
x = super(StandardizationDecorator, self).get_x(document)
return (x - self.m)/self.std
def __str__(self):
return "StandardizationDecorator()%s" \
% super(StandardizationDecorator, self).__str__()
class NormalizationDecorator(SelectorDecorator):
"""Scales all features to unit length."""
def build(self, documents):
X, Y = super(NormalizationDecorator, self).build(documents)
norm = np.sqrt(np.sum(X ** 2, axis=1))
X_norm = X/np.transpose([norm])
return X_norm, Y
def get_x(self, document):
x = super(NormalizationDecorator, self).get_x(document)
norm = np.sqrt(np.sum(x ** 2))
x_norm = x/norm
return x_norm
def __str__(self):
return "NormalizationDecorator()%s" \
% super(NormalizationDecorator, self).__str__()
class StopWordsDecorator(SelectorDecorator):
"""This decorator removes stop words from feature vector.
"""
def __init__(self, selector, language='english'):
super(StopWordsDecorator, self).__init__(selector)
self.language = language
def build(self, documents):
X, Y = super(StopWordsDecorator, self).build(documents)
stoplist = stopwords.words(self.language)
remove_lst = []
new_features = []
for i, word in enumerate(self.features):
if word in stoplist:
remove_lst.append(i)
else:
new_features.append(word)
self.features = new_features
self.remove_lst = remove_lst
X_wsw = np.delete(X, remove_lst, axis=1)
return X_wsw, Y
def get_x(self, document):
x_old = super(StopWordsDecorator, self).get_x(document)
x = np.delete(x_old, self.remove_lst)
return x
def __str__(self):
return "StopWordsDecorator(language='%s')%s" \
% (self.language, super(StopWordsDecorator, self).__str__())
class TFIDFDecorator(SelectorDecorator):
"""Implementation of TF-IDF weighing.
"""
def get_x(self, document):
x = super(TFIDFDecorator, self).get_x(document)
return self._tfidf(np.array(x))
def build(self, documents):
X, Y = super(TFIDFDecorator, self).build(documents)
N = len(X)
fs_d = sum(X > 0)
self.idfs = np.log(float(N) / fs_d)
# Method _tfidf can be used on 2D objects if input X is transposed
# and self.idfs is a column vector
self.idfs = np.transpose([self.idfs])
X_tfidf = self._tfidf(X.T).T
self.idfs = np.concatenate(self.idfs)
return X_tfidf, Y
def _tfidf(self, x):
"""Implementation of augumented term frequency to prevent a bias
towards longer documents.
"""
n = np.max(x, axis=0)
x_new = (((x * 0.5) / np.array(n, dtype=float)) + 0.5) * self.idfs
return x_new
def __str__(self):
return "TFIDFDecorator()%s" \
% super(TFIDFDecorator, self).__str__()
class LSIDecorator(SelectorDecorator):
"""Implementation of a Latent Semantic Indexing feature selector."""
def __init__(self, selector, k=500):
super(LSIDecorator, self).__init__(selector)
self.k = k
def get_x(self, document):
x = super(LSIDecorator, self).get_x(document)
return self.svd.transform(x)[0]
def build(self, documents):
X, Y = super(LSIDecorator, self).build(documents)
self.svd = TruncatedSVD(n_components=self.k, random_state=42)
self.svd.fit(X)
return self.svd.transform(X), Y
def __str__(self):
return "LSIDecorator(k=%s)%s" \
% (self.k, super(LSIDecorator, self).__str__())
class ChiSquaredDecorator(SelectorDecorator):
"""Implementation of Chi-Squared feature selection."""
def __init__(self, selector, threshold=10.86):
super(ChiSquaredDecorator, self).__init__(selector)
self.threshold = threshold
def get_x(self, document):
x = super(ChiSquaredDecorator, self).get_x(document)
x_x2 = np.delete(x, self.remove_lst)
return x_x2
def build(self, documents):
X, Y = super(ChiSquaredDecorator, self).build(documents)
x2, pval = feature_selection.chi2(X, Y)
self.remove_lst = [
i for i, val in enumerate(x2) if val < self.threshold]
X_x2 = np.delete(X, self.remove_lst, axis=1)
return X_x2, Y
def __str__(self):
return "ChiSquaredDecorator(threshold=%s)%s" \
% (self.threshold, super(ChiSquaredDecorator, self).__str__())
| mit |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/qda.py | 3 | 6694 | """
Quadratic Discriminant Analysis
"""
# Author: Matthieu Perrot <matthieu.perrot@gmail.com>
#
# License: BSD Style.
import warnings
import numpy as np
from .base import BaseEstimator, ClassifierMixin
from .utils.fixes import unique
from .utils import check_arrays
__all__ = ['QDA']
class QDA(BaseEstimator, ClassifierMixin):
"""
Quadratic Discriminant Analysis (QDA)
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
Attributes
----------
`means_` : array-like, shape = [n_classes, n_features]
Class means
`priors_` : array-like, shape = [n_classes]
Class priors (sum to 1)
`covariances_` : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class
Examples
--------
>>> from sklearn.qda import QDA
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QDA()
>>> clf.fit(X, y)
QDA(priors=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.lda.LDA: Linear discriminant analysis
"""
def __init__(self, priors=None):
self.priors = np.asarray(priors) if priors is not None else None
def fit(self, X, y, store_covariances=False, tol=1.0e-4):
"""
Fit the QDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
"""
X, y = check_arrays(X, y)
self.classes_, y = unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
if store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings = np.asarray(scalings)
self.rotations = rotations
return self
@property
def classes(self):
warnings.warn("QDA.classes is deprecated and will be removed in 0.14. "
"Use QDA.classes_ instead.", DeprecationWarning,
stacklevel=2)
return self.classes_
def _decision_function(self, X):
X = np.asarray(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations[i]
S = self.scalings[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
return (-0.5 * (norm2 + np.sum(np.log(self.scalings), 1))
+ np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.min(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| agpl-3.0 |
sunny94/temp | sympy/plotting/tests/test_plot_implicit.py | 17 | 2600 | import warnings
from sympy import (plot_implicit, cos, Symbol, Eq, sin, re, And, Or, exp, I,
tan, pi)
from sympy.plotting.plot import unset_show
from tempfile import NamedTemporaryFile
from sympy.utilities.pytest import skip
from sympy.external import import_module
#Set plots not to show
unset_show()
def tmp_file(name=''):
return NamedTemporaryFile(suffix='.png').name
def plot_and_save(name):
x = Symbol('x')
y = Symbol('y')
z = Symbol('z')
#implicit plot tests
plot_implicit(Eq(y, cos(x)), (x, -5, 5), (y, -2, 2)).save(tmp_file(name))
plot_implicit(Eq(y**2, x**3 - x), (x, -5, 5),
(y, -4, 4)).save(tmp_file(name))
plot_implicit(y > 1 / x, (x, -5, 5),
(y, -2, 2)).save(tmp_file(name))
plot_implicit(y < 1 / tan(x), (x, -5, 5),
(y, -2, 2)).save(tmp_file(name))
plot_implicit(y >= 2 * sin(x) * cos(x), (x, -5, 5),
(y, -2, 2)).save(tmp_file(name))
plot_implicit(y <= x**2, (x, -3, 3),
(y, -1, 5)).save(tmp_file(name))
#Test all input args for plot_implicit
plot_implicit(Eq(y**2, x**3 - x)).save(tmp_file())
plot_implicit(Eq(y**2, x**3 - x), adaptive=False).save(tmp_file())
plot_implicit(Eq(y**2, x**3 - x), adaptive=False, points=500).save(tmp_file())
plot_implicit(y > x, (x, -5, 5)).save(tmp_file())
plot_implicit(And(y > exp(x), y > x + 2)).save(tmp_file())
plot_implicit(Or(y > x, y > -x)).save(tmp_file())
plot_implicit(x**2 - 1, (x, -5, 5)).save(tmp_file())
plot_implicit(x**2 - 1).save(tmp_file())
plot_implicit(y > x, depth=-5).save(tmp_file())
plot_implicit(y > x, depth=5).save(tmp_file())
plot_implicit(y > cos(x), adaptive=False).save(tmp_file())
plot_implicit(y < cos(x), adaptive=False).save(tmp_file())
plot_implicit(And(y > cos(x), Or(y > x, Eq(y, x)))).save(tmp_file())
plot_implicit(y - cos(pi / x)).save(tmp_file())
#Test plots which cannot be rendered using the adaptive algorithm
#TODO: catch the warning.
plot_implicit(Eq(y, re(cos(x) + I*sin(x)))).save(tmp_file(name))
with warnings.catch_warnings(record=True) as w:
plot_implicit(x**2 - 1, legend='An implicit plot').save(tmp_file())
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert 'No labeled objects found' in str(w[0].message)
def test_matplotlib():
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
plot_and_save('test')
else:
skip("Matplotlib not the default backend")
| bsd-3-clause |
animeshh/nuclei-analysis | hackrpi/plot_dbscan.py | 2 | 3735 | import numpy as np
from scipy.spatial import distance
from sklearn.cluster import DBSCAN
from sklearn import metrics
#from os import getcwd
##############################################################################
# Generate sample data
#centers = [[1, 1], [-1, -1], [1, -1]]
#X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4)
#fileNum = '01'
#dataDir = getcwd()+ '/../data/path-image-1' + str(fileNum) + '.tif/'
def clabels(featureNum):
if featureNum == 0:
label = "Area"
elif featureNum == 1:
label = "Perimeter"
elif featureNum == 2:
label = "Compactness"
elif featureNum == 3:
label = "Asymmetry"
elif featureNum == 4:
label = "BoundaryIndex"
elif featureNum == 5:
label = "Compactness"
elif featureNum == 6:
label = "Contrast"
elif featureNum == 7:
label = "Dissimilarity"
elif featureNum == 8:
label = "Angular Second moment"
elif featureNum == 9:
label = "Energy"
elif featureNum == 10:
label = "Homegeneity"
return label
def load_data(fName):
#fName = dataDir + fi
fp = open(fName)
X = np.loadtxt(fp)
fp.close()
return X
def start_dbscan(fi,fo,featureIndexList=[0,1]):
##############################################################################
# Compute similarities
X = load_data(fi)
D = distance.squareform(distance.pdist(X))
S = 1 - (D / np.max(D))
#print X
#print labels_true
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(S)
core_samples = db.core_sample_indices_
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print 'Estimated number of clusters: %d' % n_clusters_
if n_clusters_ ==0:
return
#print "Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels)
#print "Completeness: %0.3f" % metrics.completeness_score(labels_true, labels)
#print "V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels)
#print "Adjusted Rand Index: %0.3f" % \
# metrics.adjusted_rand_score(labels_true, labels)
#print "Adjusted Mutual Information: %0.3f" % \
# metrics.adjusted_mutual_info_score(labels_true, labels)
print ("Silhouette Coefficient: %0.3f" %
metrics.silhouette_score(D, labels, metric='precomputed'))
##############################################################################
# Plot result
import pylab as pl
from itertools import cycle
pl.close('all')
pl.figure(1)
pl.clf()
# Black removed and is used for noise instead.
colors = cycle('bgrcmybgrcmybgrcmybgrcmy')
for k, col in zip(set(labels), colors):
if k == -1:
# Black used for noise.
col = 'k'
markersize = 6
class_members = [index[0] for index in np.argwhere(labels == k)]
cluster_core_samples = [index for index in core_samples
if labels[index] == k]
for index in class_members:
x = X[index]
if index in core_samples and k != -1:
markersize = 14
else:
markersize = 6
pl.plot(x[0], x[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=markersize)
pl.title('Estimated number of clusters: %d' % n_clusters_)
#pl.savefig(dataDir + "dbscan/"+fo )
pl.savefig(fo)
pl.xlabel(clabels(featureIndexList[0]))
pl.ylabel(clabels(featureIndexList[1]))
pl.ion()
#for testing
#start_dbscan("path-image-100.seg.000000.000000.csv","myfilter_test.png")
| mit |
mmottahedi/neuralnilm_prototype | scripts/e470.py | 2 | 7017 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import (standardise, discretize, fdiff, power_and_fdiff,
RandomSegments, RandomSegmentsInMemory,
SameLocation)
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import (MixtureDensityLayer, DeConv1DLayer,
SharedWeightsDenseLayer)
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from neuralnilm.disaggregate import disaggregate
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
447: first attempt at disaggregation
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 1000
N_SEQ_PER_BATCH = 64
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
window=("2013-03-18", None),
train_buildings=[1, 2, 4],
validation_buildings=[5],
n_seq_per_batch=N_SEQ_PER_BATCH,
standardise_input=True,
standardise_targets=True,
independently_center_inputs=True,
ignore_incomplete=True,
offset_probability=0.5,
ignore_offset_activations=True
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
1000: 1e-3,
5000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_a(name, target_appliance, seq_length):
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy.update(dict(
target_appliance=target_appliance,
logger=logging.getLogger(name),
seq_length=seq_length
))
source = SameLocation(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
NUM_FILTERS = 4
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'label': 'conv0',
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'label': 'dense0',
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'label': 'dense1',
'type': DenseLayer,
'num_units': seq_length,
'nonlinearity': rectify
},
{
'label': 'dense2',
'type': DenseLayer,
'num_units': 128,
'nonlinearity': rectify
},
{
'label': 'dense3',
'type': DenseLayer,
'num_units': seq_length,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (N_SEQ_PER_BATCH, seq_length - 3, NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': DeConv1DLayer,
'num_output_channels': 1,
'filter_length': 4,
'stride': 1,
'nonlinearity': None,
'border_mode': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
net = Net(**net_dict_copy)
return net
def main():
APPLIANCES = [
('a', ['fridge freezer', 'fridge', 'freezer'], 800),
('b', "'coffee maker'", 512),
('c', "'dish washer'", 2000),
('d', "'hair dryer'", 256),
('e', "'kettle'", 256),
('f', "'oven'", 2000),
('g', "'toaster'", 256),
('h', "'light'", 2000),
('i', ['washer dryer', 'washing machine'], 2000)
]
for experiment, appliance, seq_length in APPLIANCES[-1:]:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, 'a', full_exp_name)
func_call = func_call[:-1] + ", {}, {})".format(appliance, seq_length)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source
del net
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e470.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mit |
stefanhenneking/mxnet | example/bayesian-methods/bdk_demo.py | 45 | 15837 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import mxnet as mx
import mxnet.ndarray as nd
import numpy
import logging
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import argparse
from algos import *
from data_loader import *
from utils import *
class CrossEntropySoftmax(mx.operator.NumpyOp):
def __init__(self):
super(CrossEntropySoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1))).astype('float32')
y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (y - l)
class LogSoftmax(mx.operator.NumpyOp):
def __init__(self):
super(LogSoftmax, self).__init__(False)
def list_arguments(self):
return ['data', 'label']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
data_shape = in_shape[0]
label_shape = in_shape[0]
output_shape = in_shape[0]
return [data_shape, label_shape], [output_shape]
def forward(self, in_data, out_data):
x = in_data[0]
y = out_data[0]
y[:] = (x - x.max(axis=1, keepdims=True)).astype('float32')
y -= numpy.log(numpy.exp(y).sum(axis=1, keepdims=True)).astype('float32')
# y[:] = numpy.exp(x - x.max(axis=1).reshape((x.shape[0], 1)))
# y /= y.sum(axis=1).reshape((x.shape[0], 1))
def backward(self, out_grad, in_data, out_data, in_grad):
l = in_data[1]
y = out_data[0]
dx = in_grad[0]
dx[:] = (numpy.exp(y) - l).astype('float32')
def classification_student_grad(student_outputs, teacher_pred):
return [student_outputs[0] - teacher_pred]
def regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision):
student_mean = student_outputs[0]
student_var = student_outputs[1]
grad_mean = nd.exp(-student_var) * (student_mean - teacher_pred)
grad_var = (1 - nd.exp(-student_var) * (nd.square(student_mean - teacher_pred)
+ 1.0 / teacher_noise_precision)) / 2
return [grad_mean, grad_var]
def get_mnist_sym(output_op=None, num_hidden=400):
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='mnist_fc1', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc2', num_hidden=num_hidden)
net = mx.symbol.Activation(data=net, name='mnist_relu2', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='mnist_fc3', num_hidden=10)
if output_op is None:
net = mx.symbol.SoftmaxOutput(data=net, name='softmax')
else:
net = output_op(data=net, name='softmax')
return net
def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
if grad is None:
grad = nd.empty(theta.shape, theta.context)
theta1 = theta.asnumpy()[0]
theta2 = theta.asnumpy()[1]
v1 = sigma1 ** 2
v2 = sigma2 ** 2
vx = sigmax ** 2
denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
-(X - theta1 - theta2) ** 2 / (2 * vx))
grad_npy = numpy.zeros(theta.shape)
grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
+ numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta1 / v1
grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) * (
X - theta1 - theta2) / vx) / denominator).sum() \
+ theta2 / v2
grad[:] = grad_npy
return grad
def get_toy_sym(teacher=True, teacher_noise_precision=None):
if teacher:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='teacher_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='teacher_relu1', act_type="relu")
net = mx.symbol.FullyConnected(data=net, name='teacher_fc2', num_hidden=1)
net = mx.symbol.LinearRegressionOutput(data=net, name='teacher_output',
grad_scale=teacher_noise_precision)
else:
net = mx.symbol.Variable('data')
net = mx.symbol.FullyConnected(data=net, name='student_fc1', num_hidden=100)
net = mx.symbol.Activation(data=net, name='student_relu1', act_type="relu")
student_mean = mx.symbol.FullyConnected(data=net, name='student_mean', num_hidden=1)
student_var = mx.symbol.FullyConnected(data=net, name='student_var', num_hidden=1)
net = mx.symbol.Group([student_mean, student_var])
return net
def dev():
return mx.gpu()
def run_mnist_SGD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, exe_params, _ = SGD(sym=net, dev=dev(), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
lr=5E-6, prior_precision=1.0, minibatch_size=100)
def run_mnist_SGLD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
net = get_mnist_sym()
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
exe, sample_pool = SGLD(sym=net, dev=dev(), data_inputs=data_inputs, X=X, Y=Y,
X_test=X_test, Y_test=Y_test,
total_iter_num=1000000,
initializer=initializer,
learning_rate=4E-6, prior_precision=1.0, minibatch_size=100,
thin_interval=100, burn_in_iter_num=1000)
def run_mnist_DistilledSGLD(training_num=50000):
X, Y, X_test, Y_test = load_mnist(training_num)
minibatch_size = 100
if training_num >= 10000:
num_hidden = 800
total_iter_num = 1000000
teacher_learning_rate = 1E-6
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.1
else:
num_hidden = 400
total_iter_num = 20000
teacher_learning_rate = 4E-5
student_learning_rate = 0.0001
teacher_prior = 1
student_prior = 0.1
perturb_deviation = 0.001
teacher_net = get_mnist_sym(num_hidden=num_hidden)
logsoftmax = LogSoftmax()
student_net = get_mnist_sym(output_op=logsoftmax, num_hidden=num_hidden)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size,), ctx=dev())}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev())}
teacher_initializer = BiasXavier(factor_type="in", magnitude=1)
student_initializer = BiasXavier(factor_type="in", magnitude=1)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=total_iter_num,
student_initializer=student_initializer,
teacher_initializer=teacher_initializer,
student_optimizing_algorithm="adam",
teacher_learning_rate=teacher_learning_rate,
student_learning_rate=student_learning_rate,
teacher_prior_precision=teacher_prior, student_prior_precision=student_prior,
perturb_deviation=perturb_deviation, minibatch_size=100, dev=dev())
def run_toy_SGLD():
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0 / 9.0
net = get_toy_sym(True, teacher_noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
initializer = mx.init.Uniform(0.07)
exe, params, _ = \
SGLD(sym=net, data_inputs=data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=50000,
initializer=initializer,
learning_rate=1E-4,
# lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
prior_precision=0.1,
burn_in_iter_num=1000,
thin_interval=10,
task='regression',
minibatch_size=minibatch_size, dev=dev())
def run_toy_DistilledSGLD():
X, Y, X_test, Y_test = load_toy()
minibatch_size = 1
teacher_noise_precision = 1.0
teacher_net = get_toy_sym(True, teacher_noise_precision)
student_net = get_toy_sym(False)
data_shape = (minibatch_size,) + X.shape[1::]
teacher_data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
student_data_inputs = {'data': nd.zeros(data_shape, ctx=dev())}
# 'softmax_label': nd.zeros((minibatch_size, 10), ctx=dev())}
teacher_initializer = mx.init.Uniform(0.07)
student_initializer = mx.init.Uniform(0.07)
student_grad_f = lambda student_outputs, teacher_pred: \
regression_student_grad(student_outputs, teacher_pred, teacher_noise_precision)
student_exe, student_params, _ = \
DistilledSGLD(teacher_sym=teacher_net, student_sym=student_net,
teacher_data_inputs=teacher_data_inputs,
student_data_inputs=student_data_inputs,
X=X, Y=Y, X_test=X_test, Y_test=Y_test, total_iter_num=80000,
teacher_initializer=teacher_initializer,
student_initializer=student_initializer,
teacher_learning_rate=1E-4, student_learning_rate=0.01,
# teacher_lr_scheduler=mx.lr_scheduler.FactorScheduler(100000, 0.5),
student_lr_scheduler=mx.lr_scheduler.FactorScheduler(8000, 0.8),
student_grad_f=student_grad_f,
teacher_prior_precision=0.1, student_prior_precision=0.001,
perturb_deviation=0.1, minibatch_size=minibatch_size, task='regression',
dev=dev())
def run_toy_HMC():
X, Y, X_test, Y_test = load_toy()
minibatch_size = Y.shape[0]
noise_precision = 1 / 9.0
net = get_toy_sym(True, noise_precision)
data_shape = (minibatch_size,) + X.shape[1::]
data_inputs = {'data': nd.zeros(data_shape, ctx=dev()),
'teacher_output_label': nd.zeros((minibatch_size, 1), ctx=dev())}
initializer = mx.init.Uniform(0.07)
sample_pool = HMC(net, data_inputs=data_inputs, X=X, Y=Y, X_test=X_test, Y_test=Y_test,
sample_num=300000, initializer=initializer, prior_precision=1.0,
learning_rate=1E-3, L=10, dev=dev())
def run_synthetic_SGLD():
theta1 = 0
theta2 = 1
sigma1 = numpy.sqrt(10)
sigma2 = 1
sigmax = numpy.sqrt(2)
X = load_synthetic(theta1=theta1, theta2=theta2, sigmax=sigmax, num=100)
minibatch_size = 1
total_iter_num = 1000000
lr_scheduler = SGLDScheduler(begin_rate=0.01, end_rate=0.0001, total_iter_num=total_iter_num,
factor=0.55)
optimizer = mx.optimizer.create('sgld',
learning_rate=None,
rescale_grad=1.0,
lr_scheduler=lr_scheduler,
wd=0)
updater = mx.optimizer.get_updater(optimizer)
theta = mx.random.normal(0, 1, (2,), mx.cpu())
grad = nd.empty((2,), mx.cpu())
samples = numpy.zeros((2, total_iter_num))
start = time.time()
for i in xrange(total_iter_num):
if (i + 1) % 100000 == 0:
end = time.time()
print("Iter:%d, Time spent: %f" % (i + 1, end - start))
start = time.time()
ind = numpy.random.randint(0, X.shape[0])
synthetic_grad(X[ind], theta, sigma1, sigma2, sigmax, rescale_grad=
X.shape[0] / float(minibatch_size), grad=grad)
updater('theta', grad, theta)
samples[:, i] = theta.asnumpy()
plt.hist2d(samples[0, :], samples[1, :], (200, 200), cmap=plt.cm.jet)
plt.colorbar()
plt.show()
if __name__ == '__main__':
numpy.random.seed(100)
mx.random.seed(100)
parser = argparse.ArgumentParser(
description="Examples in the paper [NIPS2015]Bayesian Dark Knowledge and "
"[ICML2011]Bayesian Learning via Stochastic Gradient Langevin Dynamics")
parser.add_argument("-d", "--dataset", type=int, default=1,
help="Dataset to use. 0 --> TOY, 1 --> MNIST, 2 --> Synthetic Data in "
"the SGLD paper")
parser.add_argument("-l", "--algorithm", type=int, default=2,
help="Type of algorithm to use. 0 --> SGD, 1 --> SGLD, other-->DistilledSGLD")
parser.add_argument("-t", "--training", type=int, default=50000,
help="Number of training samples")
args = parser.parse_args()
training_num = args.training
if args.dataset == 1:
if 0 == args.algorithm:
run_mnist_SGD(training_num)
elif 1 == args.algorithm:
run_mnist_SGLD(training_num)
else:
run_mnist_DistilledSGLD(training_num)
elif args.dataset == 0:
if 1 == args.algorithm:
run_toy_SGLD()
elif 2 == args.algorithm:
run_toy_DistilledSGLD()
elif 3 == args.algorithm:
run_toy_HMC()
else:
run_synthetic_SGLD()
| apache-2.0 |
igsr/igsr_analysis | scripts/VCF/QC/generate_report.py | 1 | 6092 | import argparse
import glob
import re
import pdb
import os
import pandas as pd
import openpyxl
from tabulate import tabulate
def check_variantype(value):
if value !='snps' and value!='indels':
raise argparse.ArgumentTypeError("%s is an invalid variant type" % value)
return value
parser = argparse.ArgumentParser(description='Script to generate report on the benchmarking of a VCF')
parser.add_argument('--dirf', required=True, help='File containing Folder/s containing the per-chr folders generated by compare_with_giab.nf')
parser.add_argument('--vt', required=True, help='Type of variant to analyze. Possible values are \'snps\' and \'indels\'', type=check_variantype)
parser.add_argument('--outprefix', required=True, help='Outprefix used for output spreadsheet. Example: out will produce out.dirpath.xlsx')
parser.add_argument('--subset', required=True, help='Generate report with High conf sites or with all sites. Possible values are: \'highconf\' or \'all\'')
args = parser.parse_args()
p=re.compile(".*/results_(.*)")
class BcftoolsStats(object):
'''
Class to store the results of running BCFtools stats on a VCF file
'''
def __init__(self, filename=None, summary_numbers=None, ts_tv=None,
ts_tv_1stalt=None, no_singleton_snps=None):
'''
Constructor
Parameters
----------
filename : str
Filename of the VCF that was used to run bcftools stats
summary_numbers : dict
Dictionary containing the basic stats. i.e.:
number of samples: 1
number of records: 1867316
.....
ts_tv : float
ts/tv ratio
ts_tv_1stalt : float
ts/tv (1st ALT)
no_singleton_snps : int
'''
self.filename = filename
self.summary_numbers = summary_numbers
self.ts_tv = ts_tv
self.ts_tv_1stalt = ts_tv_1stalt
self.no_singleton_snps = no_singleton_snps
def __str__(self):
sb = []
for key in self.__dict__:
sb.append("{key}='{value}'".format(key=key, value=self.__dict__[key]))
return ', '.join(sb)
def __repr__(self):
return self.__str__()
def parse_stats_file(f):
'''
Function to parse a stats file
:param f:
Returns
-------
BcftoolsStats object
'''
stats = BcftoolsStats(filename=f)
with open(f) as fi:
d = {}
for line in fi:
line = line.rstrip('\n')
if line.startswith('SN\t'):
key = line.split('\t')[2]
value = int(line.split('\t')[3])
d[key] = value
elif line.startswith('TSTV\t'):
ts_tv = line.split('\t')[4]
ts_tv_1stalt = line.split('\t')[7]
stats.ts_tv = ts_tv
stats.ts_tv_1stalt = ts_tv_1stalt
elif line.startswith('SiS\t'):
no_singleton_snps = line.split('\t')[3]
stats.no_singleton_snps = no_singleton_snps
stats.summary_numbers = d
return stats
#this dict will contain all interesting data. Its format will be:
# {'dirpath/': {20: {'FP': 2023}}}
# 20 is the chrom
data = dict()
with open(args.dirf) as f:
for dirpath in f:
dirpath = dirpath.rstrip("\n")
assert os.path.isdir(dirpath), "Dir '{0}' does not exist".format(dirpath)
data[dirpath]={}
for dir in glob.glob(dirpath+"/results_*"):
print("Processing: {0}".format(dir))
m = p.match(dir)
if m:
chrom=m.group(1)
numbers=dict()
res=None # this will be a list with stats for each results_chr*
# append 'highconf' or 'all' files to res
if args.subset=="highconf":
res = [f for f in glob.glob(dir+"/*.stats") if "highconf" in f]
elif args.subset=="all":
res = [f for f in glob.glob(dir+"/*.stats") if not "highconf" in f]
else:
raise Exception("Value not recognised for subset argument: {0}".format(args.subset))
for f in res:
type = os.path.basename(f).split(".")[0] # what type of file is this? TP or FP or FN
bcfobj = parse_stats_file(f) # parse this stats file
sum_dict = bcfobj.summary_numbers
# numbers dict will have the number of variants
# for a certain type (TP, FP)
if args.vt == 'snps':
numbers[type] = sum_dict['number of SNPs:']
elif args.vt == 'indels':
numbers[type] = sum_dict['number of indels:']
chr_stripped1 = None
chr_stripped2 = None
if m.group(1) == 'X' or m.group(1) == 'chrX':
chrom = chrom.replace("chrX","chr23")
chrom = chrom.replace("X","chr23")
chr_stripped1 = chrom.replace("chr","")
if chr_stripped1 != 'All':
chr_stripped2 = int(chr_stripped1)
else:
chr_stripped2 = chr_stripped1
data[dirpath][chr_stripped2] = numbers
else:
raise Exception('No chromosome was fetched from dir name')
for dir in data.keys():
print("Stats for dir: {0}".format(dir))
data_per_dir=data[dir]
df = pd.DataFrame.from_dict(data_per_dir, orient='index')
df['total_cat1']=df.TP+df.FN
df['total_cat2']=df.TP+df.FP
df['%_TP']=round(df.TP*100/df.total_cat1,2)
df['%_FN']=round(df.FN*100/df.total_cat1,2)
df['%_FP']=round(df.FP*100/df.total_cat2,2)
df1=df.sort_index()
df1=df1.loc[:,['TP','%_TP','FN','%_FN','FP','%_FP','total_cat1','total_cat2']]
print(tabulate(df1, headers='keys', tablefmt='psql'))
outfile="{0}.{1}.xlsx".format(args.outprefix,os.path.basename(dir))
writer = pd.ExcelWriter(outfile)
df1.to_excel(writer, 'Benchmarking')
writer.save()
| apache-2.0 |
superbobry/hyperopt-sklearn | hpsklearn/tests/test_estimator.py | 4 | 1604 |
try:
import unittest2 as unittest
except:
import unittest
import numpy as np
from hpsklearn.estimator import hyperopt_estimator
from hpsklearn import components
class TestIter(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.X = np.random.randn(1000, 2)
self.Y = (self.X[:, 0] > 0).astype('int')
def test_fit_iter_basic(self):
model = hyperopt_estimator(verbose=1, trial_timeout=5.0)
for ii, trials in enumerate(model.fit_iter(self.X, self.Y)):
assert trials is model.trials
assert len(trials.trials) == ii
if ii == 10:
break
def test_fit(self):
model = hyperopt_estimator(verbose=1, max_evals=5, trial_timeout=5.0)
model.fit(self.X, self.Y)
assert len(model.trials.trials) == 5
def test_fit_biginc(self):
model = hyperopt_estimator(verbose=1, max_evals=5, trial_timeout=5.0,
fit_increment=20)
model.fit(self.X, self.Y)
# -- make sure we only get 5 even with big fit_increment
assert len(model.trials.trials) == 5
class TestSpace(unittest.TestCase):
def setUp(self):
np.random.seed(123)
self.X = np.random.randn(1000, 2)
self.Y = (self.X[:, 0] > 0).astype('int')
def test_smoke(self):
# -- verify the space argument is accepted and runs
space = components.generic_space()
model = hyperopt_estimator(
verbose=1, max_evals=10, trial_timeout=5, space=space)
model.fit(self.X, self.Y)
# -- flake8 eof
| bsd-3-clause |
matthew-tucker/mne-python | examples/inverse/plot_label_source_activations.py | 32 | 2269 | """
====================================================
Extracting the time series of activations in a label
====================================================
We first apply a dSPM inverse operator to get signed activations
in a label (with positive and negative values) and we then
compare different strategies to average the times series
in a label. We compare a simple average, with an averaging
using the dipoles normal (flip mode) and then a PCA,
also using a sign flip.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, apply_inverse
print(__doc__)
data_path = sample.data_path()
label = 'Aud-lh'
label_fname = data_path + '/MEG/sample/labels/%s.label' % label
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src']
# Compute inverse solution
pick_ori = "normal" # Get signed values to see the effect of sign filp
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori=pick_ori)
label = mne.read_label(label_fname)
stc_label = stc.in_label(label)
mean = stc.extract_label_time_course(label, src, mode='mean')
mean_flip = stc.extract_label_time_course(label, src, mode='mean_flip')
pca = stc.extract_label_time_course(label, src, mode='pca_flip')
print("Number of vertices : %d" % len(stc_label.data))
# View source activations
plt.figure()
plt.plot(1e3 * stc_label.times, stc_label.data.T, 'k', linewidth=0.5)
h0, = plt.plot(1e3 * stc_label.times, mean.T, 'r', linewidth=3)
h1, = plt.plot(1e3 * stc_label.times, mean_flip.T, 'g', linewidth=3)
h2, = plt.plot(1e3 * stc_label.times, pca.T, 'b', linewidth=3)
plt.legend([h0, h1, h2], ['mean', 'mean flip', 'PCA flip'])
plt.xlabel('Time (ms)')
plt.ylabel('Source amplitude')
plt.title('Activations in Label : %s' % label)
plt.show()
| bsd-3-clause |
glennq/scikit-learn | examples/cluster/plot_adjusted_for_chance_measures.py | 105 | 4300 | """
==========================================================
Adjustment for chance in clustering performance evaluation
==========================================================
The following plots demonstrate the impact of the number of clusters and
number of samples on various clustering performance evaluation metrics.
Non-adjusted measures such as the V-Measure show a dependency between
the number of clusters and the number of samples: the mean V-Measure
of random labeling increases significantly as the number of clusters is
closer to the total number of samples used to compute the measure.
Adjusted for chance measure such as ARI display some random variations
centered around a mean score of 0.0 for any number of samples and
clusters.
Only adjusted measures can hence safely be used as a consensus index
to evaluate the average stability of clustering algorithms for a given
value of k on various overlapping sub-samples of the dataset.
"""
print(__doc__)
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from time import time
from sklearn import metrics
def uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=None, n_runs=5, seed=42):
"""Compute score for 2 random uniform cluster labelings.
Both random labelings have the same number of clusters for each value
possible value in ``n_clusters_range``.
When fixed_n_classes is not None the first labeling is considered a ground
truth class assignment with fixed number of classes.
"""
random_labels = np.random.RandomState(seed).randint
scores = np.zeros((len(n_clusters_range), n_runs))
if fixed_n_classes is not None:
labels_a = random_labels(low=0, high=fixed_n_classes, size=n_samples)
for i, k in enumerate(n_clusters_range):
for j in range(n_runs):
if fixed_n_classes is None:
labels_a = random_labels(low=0, high=k, size=n_samples)
labels_b = random_labels(low=0, high=k, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
score_funcs = [
metrics.adjusted_rand_score,
metrics.v_measure_score,
metrics.adjusted_mutual_info_score,
metrics.mutual_info_score,
]
# 2 independent random clusterings with equal cluster number
n_samples = 100
n_clusters_range = np.linspace(2, n_samples, 10).astype(np.int)
plt.figure(1)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, np.median(scores, axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for 2 random uniform labelings\n"
"with equal number of clusters")
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.legend(plots, names)
plt.ylim(ymin=-0.05, ymax=1.05)
# Random labeling with varying n_clusters against ground class labels
# with fixed number of clusters
n_samples = 1000
n_clusters_range = np.linspace(2, 100, 10).astype(np.int)
n_classes = 10
plt.figure(2)
plots = []
names = []
for score_func in score_funcs:
print("Computing %s for %d values of n_clusters and n_samples=%d"
% (score_func.__name__, len(n_clusters_range), n_samples))
t0 = time()
scores = uniform_labelings_scores(score_func, n_samples, n_clusters_range,
fixed_n_classes=n_classes)
print("done in %0.3fs" % (time() - t0))
plots.append(plt.errorbar(
n_clusters_range, scores.mean(axis=1), scores.std(axis=1))[0])
names.append(score_func.__name__)
plt.title("Clustering measures for random uniform labeling\n"
"against reference assignment with %d classes" % n_classes)
plt.xlabel('Number of clusters (Number of samples is fixed to %d)' % n_samples)
plt.ylabel('Score value')
plt.ylim(ymin=-0.05, ymax=1.05)
plt.legend(plots, names)
plt.show()
| bsd-3-clause |
bzero/statsmodels | statsmodels/genmod/_prediction.py | 27 | 9437 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 19 11:29:18 2014
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
from scipy import stats
# this is similar to ContrastResults after t_test, partially copied and adjusted
class PredictionResults(object):
def __init__(self, predicted_mean, var_pred_mean, var_resid=None,
df=None, dist=None, row_labels=None, linpred=None, link=None):
# TODO: is var_resid used? drop from arguments?
self.predicted_mean = predicted_mean
self.var_pred_mean = var_pred_mean
self.df = df
self.var_resid = var_resid
self.row_labels = row_labels
self.linpred = linpred
self.link = link
if dist is None or dist == 'norm':
self.dist = stats.norm
self.dist_args = ()
elif dist == 't':
self.dist = stats.t
self.dist_args = (self.df,)
else:
self.dist = dist
self.dist_args = ()
@property
def se_obs(self):
raise NotImplementedError
return np.sqrt(self.var_pred_mean + self.var_resid)
@property
def se_mean(self):
return np.sqrt(self.var_pred_mean)
@property
def tvalues(self):
return self.predicted_mean / self.se_mean
def t_test(self, value=0, alternative='two-sided'):
'''z- or t-test for hypothesis that mean is equal to value
Parameters
----------
value : array_like
value under the null hypothesis
alternative : string
'two-sided', 'larger', 'smaller'
Returns
-------
stat : ndarray
test statistic
pvalue : ndarray
p-value of the hypothesis test, the distribution is given by
the attribute of the instance, specified in `__init__`. Default
if not specified is the normal distribution.
'''
# from statsmodels.stats.weightstats
# assumes symmetric distribution
stat = (self.predicted_mean - value) / self.se_mean
if alternative in ['two-sided', '2-sided', '2s']:
pvalue = self.dist.sf(np.abs(stat), *self.dist_args)*2
elif alternative in ['larger', 'l']:
pvalue = self.dist.sf(stat, *self.dist_args)
elif alternative in ['smaller', 's']:
pvalue = self.dist.cdf(stat, *self.dist_args)
else:
raise ValueError('invalid alternative')
return stat, pvalue
def conf_int(self, method='endpoint', alpha=0.05, **kwds):
"""
Returns the confidence interval of the value, `effect` of the constraint.
This is currently only available for t and z tests.
Parameters
----------
alpha : float, optional
The significance level for the confidence interval.
ie., The default `alpha` = .05 returns a 95% confidence interval.
kwds : extra keyword arguments
currently ignored, only for compatibility, consistent signature
Returns
-------
ci : ndarray, (k_constraints, 2)
The array has the lower and the upper limit of the confidence
interval in the columns.
"""
tmp = np.linspace(0, 1, 6)
is_linear = (self.link.inverse(tmp) == tmp).all()
if method == 'endpoint' and not is_linear:
ci_linear = self.linpred.conf_int(alpha=alpha, obs=False)
ci = self.link.inverse(ci_linear)
elif method == 'delta' or is_linear:
se = self.se_mean
q = self.dist.ppf(1 - alpha / 2., *self.dist_args)
lower = self.predicted_mean - q * se
upper = self.predicted_mean + q * se
ci = np.column_stack((lower, upper))
# if we want to stack at a new last axis, for lower.ndim > 1
# np.concatenate((lower[..., None], upper[..., None]), axis=-1)
return ci
def summary_frame(self, what='all', alpha=0.05):
# TODO: finish and cleanup
import pandas as pd
from statsmodels.compat.collections import OrderedDict
#ci_obs = self.conf_int(alpha=alpha, obs=True) # need to split
ci_mean = self.conf_int(alpha=alpha)
to_include = OrderedDict()
to_include['mean'] = self.predicted_mean
to_include['mean_se'] = self.se_mean
to_include['mean_ci_lower'] = ci_mean[:, 0]
to_include['mean_ci_upper'] = ci_mean[:, 1]
self.table = to_include
#OrderedDict doesn't work to preserve sequence
# pandas dict doesn't handle 2d_array
#data = np.column_stack(list(to_include.values()))
#names = ....
res = pd.DataFrame(to_include, index=self.row_labels,
columns=to_include.keys())
return res
def get_prediction_glm(self, exog=None, transform=True, weights=None,
row_labels=None, linpred=None, link=None, pred_kwds=None):
"""
compute prediction results
Parameters
----------
exog : array-like, optional
The values for which you want to predict.
transform : bool, optional
If the model was fit via a formula, do you want to pass
exog through the formula. Default is True. E.g., if you fit
a model y ~ log(x1) + log(x2), and transform is True, then
you can pass a data structure that contains x1 and x2 in
their original form. Otherwise, you'd need to log the data
first.
weights : array_like, optional
Weights interpreted as in WLS, used for the variance of the predicted
residual.
args, kwargs :
Some models can take additional arguments or keywords, see the
predict method of the model for the details.
Returns
-------
prediction_results : instance
The prediction results instance contains prediction and prediction
variance and can on demand calculate confidence intervals and summary
tables for the prediction of the mean and of new observations.
"""
### prepare exog and row_labels, based on base Results.predict
if transform and hasattr(self.model, 'formula') and exog is not None:
from patsy import dmatrix
exog = dmatrix(self.model.data.design_info.builder,
exog)
if exog is not None:
if row_labels is None:
if hasattr(exog, 'index'):
row_labels = exog.index
else:
row_labels = None
exog = np.asarray(exog)
if exog.ndim == 1 and (self.model.exog.ndim == 1 or
self.model.exog.shape[1] == 1):
exog = exog[:, None]
exog = np.atleast_2d(exog) # needed in count model shape[1]
else:
exog = self.model.exog
if weights is None:
weights = getattr(self.model, 'weights', None)
if row_labels is None:
row_labels = getattr(self.model.data, 'row_labels', None)
# need to handle other arrays, TODO: is delegating to model possible ?
if weights is not None:
weights = np.asarray(weights)
if (weights.size > 1 and
(weights.ndim != 1 or weights.shape[0] == exog.shape[1])):
raise ValueError('weights has wrong shape')
### end
pred_kwds['linear'] = False
predicted_mean = self.model.predict(self.params, exog, **pred_kwds)
covb = self.cov_params()
link_deriv = self.model.family.link.inverse_deriv(linpred.predicted_mean)
var_pred_mean = link_deriv**2 * (exog * np.dot(covb, exog.T).T).sum(1)
# TODO: check that we have correct scale, Refactor scale #???
var_resid = self.scale / weights # self.mse_resid / weights
# special case for now:
if self.cov_type == 'fixed scale':
var_resid = self.cov_kwds['scale'] / weights
dist = ['norm', 't'][self.use_t]
return PredictionResults(predicted_mean, var_pred_mean, var_resid,
df=self.df_resid, dist=dist,
row_labels=row_labels, linpred=linpred, link=link)
def params_transform_univariate(params, cov_params, link=None, transform=None,
row_labels=None):
"""
results for univariate, nonlinear, monotonicaly transformed parameters
This provides transformed values, standard errors and confidence interval
for transformations of parameters, for example in calculating rates with
`exp(params)` in the case of Poisson or other models with exponential
mean function.
"""
from statsmodels.genmod.families import links
if link is None and transform is None:
link = links.Log()
if row_labels is None and hasattr(params, 'index'):
row_labels = params.index
params = np.asarray(params)
predicted_mean = link.inverse(params)
link_deriv = link.inverse_deriv(params)
var_pred_mean = link_deriv**2 * np.diag(cov_params)
# TODO: do we want covariance also, or just var/se
dist = stats.norm
# TODO: need ci for linear prediction, method of `lin_pred
linpred = PredictionResults(params, np.diag(cov_params), dist=dist,
row_labels=row_labels, link=links.identity())
res = PredictionResults(predicted_mean, var_pred_mean, dist=dist,
row_labels=row_labels, linpred=linpred, link=link)
return res
| bsd-3-clause |
grevutiu-gabriel/sympy | examples/intermediate/mplot3d.py | 93 | 1252 | #!/usr/bin/env python
"""Matplotlib 3D plotting example
Demonstrates plotting with matplotlib.
"""
import sys
from sample import sample
from sympy import sin, Symbol
from sympy.external import import_module
def mplot3d(f, var1, var2, show=True):
"""
Plot a 3d function using matplotlib/Tk.
"""
import warnings
warnings.filterwarnings("ignore", "Could not match \S")
p = import_module('pylab')
# Try newer version first
p3 = import_module('mpl_toolkits.mplot3d',
__import__kwargs={'fromlist': ['something']}) or import_module('matplotlib.axes3d')
if not p or not p3:
sys.exit("Matplotlib is required to use mplot3d.")
x, y, z = sample(f, var1, var2)
fig = p.figure()
ax = p3.Axes3D(fig)
# ax.plot_surface(x, y, z, rstride=2, cstride=2)
ax.plot_wireframe(x, y, z)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
if show:
p.show()
def main():
x = Symbol('x')
y = Symbol('y')
mplot3d(x**2 - y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(x**2+y**2, (x, -10.0, 10.0, 20), (y, -10.0, 10.0, 20))
# mplot3d(sin(x)+sin(y), (x, -3.14, 3.14, 10), (y, -3.14, 3.14, 10))
if __name__ == "__main__":
main()
| bsd-3-clause |
ssaeger/scikit-learn | sklearn/neighbors/classification.py | 132 | 14388 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
EnSpec/SpecDAL | specdal/gui/gui.py | 1 | 7156 | import os
import sys
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
import tkinter.simpledialog as tksd
sys.path.insert(0, os.path.abspath("../.."))
import matplotlib
matplotlib.use('TkAgg')
from specdal.spectrum import Spectrum
from specdal.collection import Collection
from viewer import Viewer
from collections import OrderedDict
# ~/data/specdal/aidan_data2/PSR/
class SpecdalGui(tk.Tk):
"""GUI entry point for Specdal"""
def __init__(self, collections=None):
tk.Tk.__init__(self)
# create menubar
self.config(menu=Menubar(self))
# create list
self.collectionList = CollectionList(self, collections)
self.collectionList.pack(side=tk.LEFT, fill=tk.Y)
# create viewer
self.viewer = Viewer(self, self.collectionList.currentCollection,
with_toolbar=False)
self.viewer.pack(side=tk.LEFT, fill=tk.BOTH)
def read_dir(self):
directory = filedialog.askdirectory()
if not directory:
return
self.collectionList.add_collection(
Collection(name="collection" + str(self.collectionList.listbox.size()), directory=directory))
def group_by(self, collection=None):
separator = tksd.askstring("separator", "Enter separator pattern", initialvalue="_")
if separator is None:
return
indices = tksd.askstring("indices", "Enter indices to group by (comma separated)", initialvalue="0")
if indices is None:
return
indices = list(map(int, indices.replace(" ", "").split(",")))
if collection is None:
collection = self.collectionList.currentCollection
groups = collection.groupby(separator=separator, indices=indices, filler=None)
for gname, gcoll in groups.items():
gcoll.name = collection.name + " (" + gcoll.name + ")"
self.collectionList.add_collection(gcoll)
class CollectionList(tk.Frame):
"""Stores and manages collections"""
def __init__(self, parent, collections=None):
tk.Frame.__init__(self, parent)
self.collections = OrderedDict()
self.currentCollection = None
# gui
self.scrollbar = ttk.Scrollbar(self)
self.listbox = tk.Listbox(self, yscrollcommand=self.scrollbar.set,
width=30)
self.scrollbar.config(command=self.listbox.yview)
self.listbox.pack(side=tk.LEFT, fill=tk.Y)
self.scrollbar.pack(side=tk.LEFT, fill=tk.Y)
self.listbox.bind('<Double-1>', lambda x:
self.master.viewer.set_collection(
self.set_cur(pos=self.get_selection()[0][0])))
# load provided collections
if collections:
for c in collections:
self.add_collection(c)
self.set_cur()
def set_cur(self, name=None, pos=0):
if name is None:
# TODO: check whether pos is valid
name = self.listbox.get(pos)
self.currentCollection = self.get_collection(name)
return self.currentCollection
def add_collection(self, collection):
assert isinstance(collection, Collection)
self.collections[collection.name] = collection
# add to listbox
self.listbox.insert(tk.END, collection.name)
def get_collection(self, name):
if name in self.collections:
return self.collections[name]
def get_selection(self):
''' return indices (tuple) and names (list) '''
idx = self.listbox.curselection()
all_names = list(self.collections)
names = [ all_names[i] for i in idx ]
return idx, names
def remove_selection(self):
idx, names = self.get_selection()
# remove from listbox
for i in sorted(idx, reverse=True):
self.listbox.delete(i)
# remove from dict
for name in names:
if self.currentCollection.name == name:
self.set_cur()
self.collections.__delitem__(name)
def not_implemented_message(feature_name):
tk.messagebox.showinfo(feature_name, "Not implemented")
pass
class Menubar(tk.Menu):
# parent is the SpecdalGui class
def __init__(self, parent):
tk.Menu.__init__(self, parent)
# File
fileMenu = tk.Menu(self, tearoff=0)
fileMenu.add_command(label="open", command=lambda: not_implemented_message("open"))
fileMenu.add_command(label="read file", command=lambda: not_implemented_message("read file"))
fileMenu.add_command(label="read directory", command=lambda: self.master.read_dir())
fileMenu.add_command(label="read csv", command=lambda: not_implemented_message("read csv"))
fileMenu.add_command(label="save", command=lambda: not_implemented_message("save"))
fileMenu.add_command(label="save as", command=lambda: not_implemented_message("save as"))
fileMenu.add_command(label="close", command=lambda: not_implemented_message("close"))
self.add_cascade(label="File", menu=fileMenu)
# Edit
editMenu = tk.Menu(self, tearoff=0)
editMenu.add_command(label="flag/unflag", command=lambda: self.master.viewer.toggle_flag())
editMenu.add_command(label="remove collection", command=lambda: self.master.collectionList.remove_selection())
editMenu.add_command(label="setting", command=lambda: not_implemented_message("setting"))
self.add_cascade(label="Edit", menu=editMenu)
# View
viewMenu = tk.Menu(self, tearoff=0)
viewMenu.add_command(label="Collection/Spectra Mode", command=lambda: self.master.viewer.toggle_mode())
viewMenu.add_command(label="Show/Hide Flagged", command=lambda: self.master.viewer.toggle_show_flagged())
viewMenu.add_command(label="Mean", command=lambda: self.master.viewer.toggle_mean())
viewMenu.add_command(label="Median", command=lambda: self.master.viewer.toggle_median())
viewMenu.add_command(label="Max", command=lambda: self.master.viewer.toggle_max())
viewMenu.add_command(label="Min", command=lambda: self.master.viewer.toggle_min())
viewMenu.add_command(label="Std", command=lambda: self.master.viewer.toggle_std())
self.add_cascade(label="View", menu=viewMenu)
# Operators
operatorMenu = tk.Menu(self, tearoff=0)
operatorMenu.add_command(label="Groupby", command=lambda: self.master.group_by())
operatorMenu.add_command(label="Stitch", command=lambda: self.master.viewer.stitch())
operatorMenu.add_command(label="Jump Correct", command=lambda: self.master.viewer.jump_correct())
self.add_cascade(label="Operator", menu=operatorMenu)
def read_test_data():
path = '~/data/specdal/aidan_data2/ASD'
c = Collection("Test Collection", directory=path)
for i in range(30):
c.flag(c.spectra[i].name)
return c
def main():
gui = SpecdalGui()
gui.mainloop()
if __name__ == "__main__":
main()
| mit |
bsaleil/lc | tools/benchtime.py | 1 | 8755 | #!/usr/bin/python3
#---------------------------------------------------------------------------
#
# Copyright (c) 2015, Baptiste Saleil. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#---------------------------------------------------------------------------
import sys
import os
import glob
import subprocess
from pylab import *
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import rc
from matplotlib.ticker import FuncFormatter
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# -------------------------
# Constants
# -------------------------
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__)) + '/' # Current script path
LC_PATH = SCRIPT_PATH + '../' # Compiler path
LC_EXEC = 'lazy-comp' # Compiler exec name
PDF_OUTPUT = SCRIPT_PATH + 'times.pdf' # PDF output file
BENCH_PATH = LC_PATH + 'benchmarks/*.scm' # Benchmarks path
BAR_COLORS = ["#DDDDDD", "#AAAAAA", "#666666", "#333333", "#000000"] # Bar colors
ITERS = 10 # Number of iterations >=4 (we remove first, min and max)
FONT_SIZE = 9 # Font size used to generate pdf using latex (must match the paper font size)
# -------------------------
# Config
# -------------------------
# Change to lc directory
os.chdir(LC_PATH)
# Get all benchmarks full path sorted by name
files = sorted(glob.glob(BENCH_PATH))
# Used as matplotlib formatter
def to_percent(y, position):
s = str(int(y))
# The percent symbol needs escaping in latex
if matplotlib.rcParams['text.usetex'] is True:
return s + r'$\%$'
else:
return s + '%'
# -------------------------
# Exec benchmarks
# and get times
# -------------------------
# Execute ITERS times the file with given options
# Remove first, min and max times and return the sum
def getTime(file,options):
opts = [LC_PATH + LC_EXEC, file, '--time --verbose-gc']
opts.extend(options)
times = []
for i in range(0,ITERS):
output = subprocess.check_output(opts).decode("utf-8")
if "GC" in output: # If gc is triggered, stop the script
raise Exception('GC is used with benchmark ' + file)
times.append( float(output.split(':')[1].strip()))
# Remove first,min,max
times.remove(times[0])
times.remove(min(times))
times.remove(max(times))
return sum(times)
TIME = []
idx = 0
for file in files:
idx+=1
print('(' + str(idx) + '/' + str(len(files)) + ') ' + file);
# Exec without versioning
print('\t* No versioning...')
time_nv = getTime(file,['--disable-entry-points', '--disable-return-points','--max-versions 0']);
# Exec with versioning only
print('\t* Versioning only...')
time_v = getTime(file,['--disable-entry-points', '--disable-return-points']);
# Exec with versioning and entry points
print('\t* Versioning + entry points...')
time_ve = getTime(file,['--disable-return-points']);
# Exec with versioning and return points
print('\t* Versioning + return points...')
time_vr = getTime(file,['--disable-entry-points']);
# Exec with versioning and entry and return points
print('\t* Versioning + entry points + return points...')
time_ver = getTime(file,[]);
# Exec with versioning and entry and return points and max=5
print('\t* Versioning + entry points + return points + max=5...')
time_vermax = getTime(file,['--max-versions 5']);
TIME.append([file,time_nv,time_v,time_ve,time_vr,time_ver,time_vermax])
# -------------------------
# Draw graph
# -------------------------
# Draw bars on graph with times of times_p at index time_idx (which is one of ve, vr, ver)
# This bar will use the given color and label
def drawBar(times_p,time_idx,color_idx,label):
Y = list(map(lambda x: x[time_idx], times_p))
bar(X, Y, bar_width, facecolor=BAR_COLORS[color_idx], edgecolor='white', label=label)
print('Draw graph...')
# Graph config
nb_items = len(files) + 1 # Number of times to draw (+1 for arithmetic mean)
bar_width = 1 # Define width of a single bar
matplotlib.rcParams.update({'font.size': FONT_SIZE}) # Set font size of all elements of the graph
fig = plt.figure('',figsize=(8,3.4)) # Create new figure
#plt.title('Execution time') # Set title
gca().get_xaxis().set_visible(False) # Hide x values
ylim(0,120) # Set y scale from 0 to 120
xlim(0, nb_items*6) # Set x scale from 0 to nb_items*5
fig.subplots_adjust(bottom=0.4) # Give more space at bottom for benchmark names
# Draw grid
axes = gca()
axes.grid(True, zorder=1, color="#707070")
axes.set_axisbelow(True) # Keep grid under the axes
# Convert times to % times
TIMEP = []
for t in TIME:
file = t[0]
time_nv = t[1]
time_v = t[2]
time_ve = t[3]
time_vr = t[4]
time_ver = t[5]
time_vermax = t[6]
# Compute % values relative to time with versioning only
timep_nv = 100
timep_v = (100*time_v) / time_nv
timep_ve = (100*time_ve) / time_nv
timep_vr = (100*time_vr) / time_nv
timep_ver = (100*time_ver) / time_nv
timep_vermax = (100*time_vermax) / time_nv
TIMEP.append([file,timep_nv,timep_v,timep_ve,timep_vr,timep_ver,timep_vermax])
# Sort by timep_ver
TIMEP.sort(key=lambda x: x[5])
# Add arithmetic mean values
name = 'arith. mean'
time_nv = 100
timep_v = sum(list(map(lambda x: x[2], TIMEP)))/len(files)
timep_ve = sum(list(map(lambda x: x[3], TIMEP)))/len(files)
timep_vr = sum(list(map(lambda x: x[4], TIMEP)))/len(files)
timep_ver = sum(list(map(lambda x: x[5], TIMEP)))/len(files)
timep_vermax = sum(list(map(lambda x: x[6], TIMEP)))/len(files)
TIMEP.append([name,time_nv,timep_v,timep_ve,timep_vr,timep_ver,timep_vermax])
print("Means:")
print("Vers. " + str(timep_v))
print("Vers. Entry " + str(timep_ve))
print("Vers. Return " + str(timep_vr))
print("Vers. Entry + Return " + str(timep_ver))
print("Vers. Entry + Return + Max=5 " + str(timep_vermax))
# DRAW V
X = np.arange(0,nb_items*6,6) # [0,5,10,..]
drawBar(TIMEP,2,0,'Vers.')
# DRAW VE
X = np.arange(1,nb_items*6,6) # [1,6,11,..]
drawBar(TIMEP,3,1,'Vers. + Entry')
# DRAW VR
X = np.arange(2,nb_items*6,6) # [2,7,12,..]
drawBar(TIMEP,4,2,'Vers. + Return')
# DRAW VER
X = np.arange(3,nb_items*6,6) # [3,6,10,..]
drawBar(TIMEP,5,3,'Vers. + Entry + Return')
# DRAW VERMAX
X = np.arange(4,nb_items*6,6) # [3,6,10,..]
drawBar(TIMEP,6,4,'Vers. + Entry + Return + Max=5')
# DRAW BENCHMARK NAMES
i = 0
for time in TIMEP[:-1]:
text(i+2.5-((1-bar_width)/2), -3, os.path.basename(time[0])[:-4], rotation=90, ha='center', va='top')
i+=6
text(i+2.5-((1-bar_width)/2), -3,TIMEP[-1][0], rotation=90, ha='center', va='top') # arithmetic mean time (last one)
legend(bbox_to_anchor=(0., 0., 1., -0.55), prop={'size':FONT_SIZE}, ncol=3, mode="expand", borderaxespad=0.)
# Add '%' symbol to ylabels
formatter = FuncFormatter(to_percent)
plt.gca().yaxis.set_major_formatter(formatter)
## SAVE/SHOW GRAPH
pdf = PdfPages(PDF_OUTPUT)
pdf.savefig(fig)
pdf.close()
print('Saved to ' + PDF_OUTPUT)
print('Done!')
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/IPython/lib/tests/test_latextools.py | 8 | 3869 | # encoding: utf-8
"""Tests for IPython.utils.path.py"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import nose.tools as nt
from IPython.lib import latextools
from IPython.testing.decorators import onlyif_cmds_exist, skipif_not_matplotlib
from IPython.utils.process import FindCmdError
def test_latex_to_png_dvipng_fails_when_no_cmd():
"""
`latex_to_png_dvipng` should return None when there is no required command
"""
for command in ['latex', 'dvipng']:
yield (check_latex_to_png_dvipng_fails_when_no_cmd, command)
def check_latex_to_png_dvipng_fails_when_no_cmd(command):
def mock_find_cmd(arg):
if arg == command:
raise FindCmdError
with patch.object(latextools, "find_cmd", mock_find_cmd):
nt.assert_equal(latextools.latex_to_png_dvipng("whatever", True),
None)
@onlyif_cmds_exist('latex', 'dvipng')
def test_latex_to_png_dvipng_runs():
"""
Test that latex_to_png_dvipng just runs without error.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return None
for (s, wrap) in [(u"$$x^2$$", False), (u"x^2", True)]:
yield (latextools.latex_to_png_dvipng, s, wrap)
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
yield (latextools.latex_to_png_dvipng, s, wrap)
@skipif_not_matplotlib
def test_latex_to_png_mpl_runs():
"""
Test that latex_to_png_mpl just runs without error.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return None
for (s, wrap) in [("$x^2$", False), ("x^2", True)]:
yield (latextools.latex_to_png_mpl, s, wrap)
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
yield (latextools.latex_to_png_mpl, s, wrap)
@skipif_not_matplotlib
def test_latex_to_html():
img = latextools.latex_to_html("$x^2$")
nt.assert_in("data:image/png;base64,iVBOR", img)
def test_genelatex_no_wrap():
"""
Test genelatex with wrap=False.
"""
def mock_kpsewhich(filename):
assert False, ("kpsewhich should not be called "
"(called with {0})".format(filename))
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
nt.assert_equal(
'\n'.join(latextools.genelatex("body text", False)),
r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
body text
\end{document}''')
def test_genelatex_wrap_with_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is installed.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return "path/to/breqn.sty"
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
nt.assert_equal(
'\n'.join(latextools.genelatex("x^2", True)),
r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\usepackage{breqn}
\pagestyle{empty}
\begin{document}
\begin{dmath*}
x^2
\end{dmath*}
\end{document}''')
def test_genelatex_wrap_without_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is not installed.
"""
def mock_kpsewhich(filename):
nt.assert_equal(filename, "breqn.sty")
return None
with patch.object(latextools, "kpsewhich", mock_kpsewhich):
nt.assert_equal(
'\n'.join(latextools.genelatex("x^2", True)),
r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
$$x^2$$
\end{document}''')
| gpl-3.0 |
vaxin/captcha | line.py | 1 | 3107 | import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import random
import util
def _fpart(x):
return x - int(x)
def _rfpart(x):
return 1 - _fpart(x)
def getpixel(img, xy):
if xy[0] >= len(img) or xy[1] >= len(img[xy[0]]):
return (255., 255., 255.)
return img[xy[0]][xy[1]]
def putpixel(img, xy, color, alpha=1):
if xy[0] >= len(img) or xy[1] >= len(img[xy[0]]):
return
"""Paints color over the background at the point xy in img.
Use alpha for blending. alpha=1 means a completely opaque foreground.
"""
c = tuple(map(lambda bg, fg: int(round(alpha * fg + (1-alpha) * bg)),
getpixel(img, xy), color))
img[xy[0]][xy[1]] = c
def draw_line(img, p1, p2, color):
"""Draws an anti-aliased line in img from p1 to p2 with the given color."""
x1, y1, x2, y2 = p1 + p2
dx, dy = x2-x1, y2-y1
steep = abs(dx) < abs(dy)
p = lambda px, py: ((px,py), (py,px))[steep]
if abs(dx) > abs(dy):
p = lambda px, py: (px,py)
else:
p = lambda px, py: (py, px)
x1, y1, x2, y2, dx, dy = y1, x1, y2, x2, dy, dx
if x2 < x1:
x1, x2, y1, y2 = x2, x1, y2, y1
grad = dy/dx
intery = y1 + _rfpart(x1) * grad
def draw_endpoint(pt):
x, y = pt
xend = round(x)
yend = y + grad * (xend - x)
xgap = _rfpart(x + 0.5)
px, py = int(xend), int(yend)
putpixel(img, p(px, py), color, _rfpart(yend) * xgap)
putpixel(img, p(px, py+1), color, _fpart(yend) * xgap)
return px
xstart = draw_endpoint(p(*p1)) + 1
xend = draw_endpoint(p(*p2))
if xstart > xend:
xstart, xend = xend, xstart
for x in range(xstart, xend):
y = int(intery)
putpixel(img, p(x, y), color, _rfpart(intery))
putpixel(img, p(x, y+1), color, _fpart(intery))
intery += grad
def showImg(arr):
implot = plt.imshow(arr, cmap=cm.Greys_r, vmin=0, vmax=255)
implot.set_interpolation('nearest')
plt.show()
def genImage(size):
''' return 2d array with elements like [ 255, 0, 127 ] np.uint8 '''
img = []
for i in range(size):
img.append([])
for j in range(size):
img[i].append((255., 255., 255.))
# 2 x or x 2, x ~ (2, size - 2)
start_x = float(int(random.random() * (size - 2)) + 1)
start_y = 1
if random.random() > 0.5:
start_y = start_x
start_x = 1
end_x = size - 1 - start_x
end_y = size - 1 - start_y
#print (start_x, start_y), (end_x, end_y)
if start_x - end_x == 0 and start_y - end_y == 0:
return genImage(size)
if start_x > end_x:
start_x, start_y, end_x, end_y = end_x, end_y, start_x, start_y
#start_x, start_y, end_x, end_y = 41.0, 55.0, 45.0, 16.0
draw_line(img, (start_x, start_y), (end_x, end_y), (0., 0., 0.))
res = []
for x in range(size):
row = []
for y in range(size):
#row.append([ int(one) for one in img[x][y] ])
row.append(img[x][y][0])
res.append(row)
return np.array(res, dtype = np.uint8)
if __name__ == '__main__':
res = genImage(30)
#import img
#img.saveImageFromArray(res, 'line.png')
from PIL import Image
img = Image.fromarray(res / 255.)
img.save('test.tiff')
| mit |
pythonvietnam/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 244 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
jm-begon/scikit-learn | sklearn/feature_extraction/hashing.py | 183 | 6155 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
import numbers
import numpy as np
import scipy.sparse as sp
from . import _hashing
from ..base import BaseEstimator, TransformerMixin
def _iteritems(d):
"""Like d.iteritems, but accepts any collections.Mapping."""
return d.iteritems() if hasattr(d, "iteritems") else d.items()
class FeatureHasher(BaseEstimator, TransformerMixin):
"""Implements feature hashing, aka the hashing trick.
This class turns sequences of symbolic feature names (strings) into
scipy.sparse matrices, using a hash function to compute the matrix column
corresponding to a name. The hash function employed is the signed 32-bit
version of Murmurhash3.
Feature names of type byte string are used as-is. Unicode strings are
converted to UTF-8 first, but no Unicode normalization is done.
Feature values must be (finite) numbers.
This class is a low-memory alternative to DictVectorizer and
CountVectorizer, intended for large-scale (online) learning and situations
where memory is tight, e.g. when running prediction code on embedded
devices.
Read more in the :ref:`User Guide <feature_hashing>`.
Parameters
----------
n_features : integer, optional
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
dtype : numpy type, optional
The type of feature values. Passed to scipy.sparse matrix constructors
as the dtype argument. Do not set this to bool, np.boolean or any
unsigned integer type.
input_type : string, optional
Either "dict" (the default) to accept dictionaries over
(feature_name, value); "pair" to accept pairs of (feature_name, value);
or "string" to accept single strings.
feature_name should be a string, while value should be a number.
In the case of "string", a value of 1 is implied.
The feature_name is hashed to find the appropriate column for the
feature. The value's sign might be flipped in the output (but see
non_negative, below).
non_negative : boolean, optional, default np.float64
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
Examples
--------
>>> from sklearn.feature_extraction import FeatureHasher
>>> h = FeatureHasher(n_features=10)
>>> D = [{'dog': 1, 'cat':2, 'elephant':4},{'dog': 2, 'run': 5}]
>>> f = h.transform(D)
>>> f.toarray()
array([[ 0., 0., -4., -1., 0., 0., 0., 0., 0., 2.],
[ 0., 0., 0., -2., -5., 0., 0., 0., 0., 0.]])
See also
--------
DictVectorizer : vectorizes string-valued features using a hash table.
sklearn.preprocessing.OneHotEncoder : handles nominal/categorical features
encoded as columns of integers.
"""
def __init__(self, n_features=(2 ** 20), input_type="dict",
dtype=np.float64, non_negative=False):
self._validate_params(n_features, input_type)
self.dtype = dtype
self.input_type = input_type
self.n_features = n_features
self.non_negative = non_negative
@staticmethod
def _validate_params(n_features, input_type):
# strangely, np.int16 instances are not instances of Integral,
# while np.int64 instances are...
if not isinstance(n_features, (numbers.Integral, np.integer)):
raise TypeError("n_features must be integral, got %r (%s)."
% (n_features, type(n_features)))
elif n_features < 1 or n_features >= 2 ** 31:
raise ValueError("Invalid number of features (%d)." % n_features)
if input_type not in ("dict", "pair", "string"):
raise ValueError("input_type must be 'dict', 'pair' or 'string',"
" got %r." % input_type)
def fit(self, X=None, y=None):
"""No-op.
This method doesn't do anything. It exists purely for compatibility
with the scikit-learn transformer API.
Returns
-------
self : FeatureHasher
"""
# repeat input validation for grid search (which calls set_params)
self._validate_params(self.n_features, self.input_type)
return self
def transform(self, raw_X, y=None):
"""Transform a sequence of instances to a scipy.sparse matrix.
Parameters
----------
raw_X : iterable over iterable over raw features, length = n_samples
Samples. Each sample must be iterable an (e.g., a list or tuple)
containing/generating feature names (and optionally values, see
the input_type constructor argument) which will be hashed.
raw_X need not support the len function, so it can be the result
of a generator; n_samples is determined on the fly.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Feature matrix, for use with estimators or further transformers.
"""
raw_X = iter(raw_X)
if self.input_type == "dict":
raw_X = (_iteritems(d) for d in raw_X)
elif self.input_type == "string":
raw_X = (((f, 1) for f in x) for x in raw_X)
indices, indptr, values = \
_hashing.transform(raw_X, self.n_features, self.dtype)
n_samples = indptr.shape[0] - 1
if n_samples == 0:
raise ValueError("Cannot vectorize empty sequence.")
X = sp.csr_matrix((values, indices, indptr), dtype=self.dtype,
shape=(n_samples, self.n_features))
X.sum_duplicates() # also sorts the indices
if self.non_negative:
np.abs(X.data, X.data)
return X
| bsd-3-clause |
Fireblend/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
xavierwu/scikit-learn | sklearn/linear_model/tests/test_sag.py | 93 | 25649 | # Authors: Danny Sullivan <dbsullivan23@gmail.com>
# Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# Licence: BSD 3 clause
import math
import numpy as np
import scipy.sparse as sp
from sklearn.linear_model.sag import get_auto_step_size
from sklearn.linear_model.sag_fast import get_max_squared_sum
from sklearn.linear_model import LogisticRegression, Ridge
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils import compute_class_weight
from sklearn.preprocessing import LabelEncoder
from sklearn.datasets import make_blobs
from sklearn.base import clone
# this is used for sag classification
def log_dloss(p, y):
z = p * y
# approximately equal and saves the computation of the log
if z > 18.0:
return math.exp(-z) * -y
if z < -18.0:
return -y
return -y / (math.exp(z) + 1.0)
def log_loss(p, y):
return np.mean(np.log(1. + np.exp(-y * p)))
# this is used for sag regression
def squared_dloss(p, y):
return p - y
def squared_loss(p, y):
return np.mean(0.5 * (p - y) * (p - y))
# function for measuring the log loss
def get_pobj(w, alpha, myX, myy, loss):
w = w.ravel()
pred = np.dot(myX, w)
p = loss(pred, myy)
p += alpha * w.dot(w) / 2.
return p
def sag(X, y, step_size, alpha, n_iter=1, dloss=None, sparse=False,
sample_weight=None, fit_intercept=True):
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(X.shape[1])
sum_gradient = np.zeros(X.shape[1])
gradient_memory = np.zeros((n_samples, n_features))
intercept = 0.0
intercept_sum_gradient = 0.0
intercept_gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
decay = 1.0
seen = set()
# sparse data has a fixed decay of .01
if sparse:
decay = .01
for epoch in range(n_iter):
for k in range(n_samples):
idx = int(rng.rand(1) * n_samples)
# idx = k
entry = X[idx]
seen.add(idx)
p = np.dot(entry, weights) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient + alpha * weights
sum_gradient += update - gradient_memory[idx]
gradient_memory[idx] = update
if fit_intercept:
intercept_sum_gradient += (gradient -
intercept_gradient_memory[idx])
intercept_gradient_memory[idx] = gradient
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
weights -= step_size * sum_gradient / len(seen)
return weights, intercept
def sag_sparse(X, y, step_size, alpha, n_iter=1,
dloss=None, sample_weight=None, sparse=False,
fit_intercept=True):
if step_size * alpha == 1.:
raise ZeroDivisionError("Sparse sag does not handle the case "
"step_size * alpha == 1")
n_samples, n_features = X.shape[0], X.shape[1]
weights = np.zeros(n_features)
sum_gradient = np.zeros(n_features)
last_updated = np.zeros(n_features, dtype=np.int)
gradient_memory = np.zeros(n_samples)
rng = np.random.RandomState(77)
intercept = 0.0
intercept_sum_gradient = 0.0
wscale = 1.0
decay = 1.0
seen = set()
c_sum = np.zeros(n_iter * n_samples)
# sparse data has a fixed decay of .01
if sparse:
decay = .01
counter = 0
for epoch in range(n_iter):
for k in range(n_samples):
# idx = k
idx = int(rng.rand(1) * n_samples)
entry = X[idx]
seen.add(idx)
if counter >= 1:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter
p = (wscale * np.dot(entry, weights)) + intercept
gradient = dloss(p, y[idx])
if sample_weight is not None:
gradient *= sample_weight[idx]
update = entry * gradient
sum_gradient += update - (gradient_memory[idx] * entry)
if fit_intercept:
intercept_sum_gradient += gradient - gradient_memory[idx]
intercept -= (step_size * intercept_sum_gradient
/ len(seen) * decay)
gradient_memory[idx] = gradient
wscale *= (1.0 - alpha * step_size)
if counter == 0:
c_sum[0] = step_size / (wscale * len(seen))
else:
c_sum[counter] = (c_sum[counter - 1] +
step_size / (wscale * len(seen)))
if counter >= 1 and wscale < 1e-9:
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
last_updated[j] = counter + 1
c_sum[counter] = 0
weights *= wscale
wscale = 1.0
counter += 1
for j in range(n_features):
if last_updated[j] == 0:
weights[j] -= c_sum[counter - 1] * sum_gradient[j]
else:
weights[j] -= ((c_sum[counter - 1] -
c_sum[last_updated[j] - 1]) *
sum_gradient[j])
weights *= wscale
return weights, intercept
def get_step_size(X, alpha, fit_intercept, classification=True):
if classification:
return (4.0 / (np.max(np.sum(X * X, axis=1))
+ fit_intercept + 4.0 * alpha))
else:
return 1.0 / (np.max(np.sum(X * X, axis=1)) + fit_intercept + alpha)
@ignore_warnings
def test_classifier_matching():
n_samples = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
y[y == 0] = -1
alpha = 1.1
n_iter = 80
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept)
clf = LogisticRegression(solver="sag", fit_intercept=fit_intercept,
tol=1e-11, C=1. / alpha / n_samples,
max_iter=n_iter, random_state=10)
clf.fit(X, y)
weights, intercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
weights = np.atleast_2d(weights)
intercept = np.atleast_1d(intercept)
weights2 = np.atleast_2d(weights2)
intercept2 = np.atleast_1d(intercept2)
assert_array_almost_equal(weights, clf.coef_, decimal=10)
assert_array_almost_equal(intercept, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_regressor_matching():
n_samples = 10
n_features = 5
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
alpha = 1.
n_iter = 100
fit_intercept = True
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha * n_samples, max_iter=n_iter)
clf.fit(X, y)
weights1, intercept1 = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
weights2, intercept2 = sag(X, y, step_size, alpha, n_iter=n_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
assert_array_almost_equal(weights1, clf.coef_, decimal=10)
assert_array_almost_equal(intercept1, clf.intercept_, decimal=10)
assert_array_almost_equal(weights2, clf.coef_, decimal=10)
assert_array_almost_equal(intercept2, clf.intercept_, decimal=10)
@ignore_warnings
def test_sag_pobj_matches_logistic_regression():
"""tests if the sag pobj matches log reg"""
n_samples = 100
alpha = 1.0
max_iter = 20
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
clf1 = LogisticRegression(solver='sag', fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf2 = clone(clf1)
clf3 = LogisticRegression(fit_intercept=False, tol=.0000001,
C=1. / alpha / n_samples, max_iter=max_iter,
random_state=10)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, log_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, log_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, log_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj2, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj1, decimal=4)
@ignore_warnings
def test_sag_pobj_matches_ridge_regression():
"""tests if the sag pobj matches ridge reg"""
n_samples = 100
n_features = 10
alpha = 1.0
n_iter = 100
fit_intercept = False
rng = np.random.RandomState(10)
X = rng.normal(size=(n_samples, n_features))
true_w = rng.normal(size=n_features)
y = X.dot(true_w)
clf1 = Ridge(fit_intercept=fit_intercept, tol=.00000000001, solver='sag',
alpha=alpha, max_iter=n_iter, random_state=42)
clf2 = clone(clf1)
clf3 = Ridge(fit_intercept=fit_intercept, tol=.00001, solver='lsqr',
alpha=alpha, max_iter=n_iter, random_state=42)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
clf3.fit(X, y)
pobj1 = get_pobj(clf1.coef_, alpha, X, y, squared_loss)
pobj2 = get_pobj(clf2.coef_, alpha, X, y, squared_loss)
pobj3 = get_pobj(clf3.coef_, alpha, X, y, squared_loss)
assert_array_almost_equal(pobj1, pobj2, decimal=4)
assert_array_almost_equal(pobj1, pobj3, decimal=4)
assert_array_almost_equal(pobj3, pobj2, decimal=4)
@ignore_warnings
def test_sag_regressor_computed_correctly():
"""tests if the sag regressor is computed correctly"""
alpha = .1
n_features = 10
n_samples = 40
max_iter = 50
tol = .000001
fit_intercept = True
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w) + 2.
step_size = get_step_size(X, alpha, fit_intercept, classification=False)
clf1 = Ridge(fit_intercept=fit_intercept, tol=tol, solver='sag',
alpha=alpha * n_samples, max_iter=max_iter)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights1, spintercept1 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=max_iter,
dloss=squared_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights1.ravel(),
decimal=3)
assert_almost_equal(clf1.intercept_, spintercept1, decimal=1)
# TODO: uncomment when sparse Ridge with intercept will be fixed (#4710)
#assert_array_almost_equal(clf2.coef_.ravel(),
# spweights2.ravel(),
# decimal=3)
#assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)'''
@ignore_warnings
def test_get_auto_step_size():
X = np.array([[1, 2, 3], [2, 3, 4], [2, 3, 2]], dtype=np.float64)
alpha = 1.2
fit_intercept = False
# sum the squares of the second sample because that's the largest
max_squared_sum = 4 + 9 + 16
max_squared_sum_ = get_max_squared_sum(X)
assert_almost_equal(max_squared_sum, max_squared_sum_, decimal=4)
for fit_intercept in (True, False):
step_size_sqr = 1.0 / (max_squared_sum + alpha + int(fit_intercept))
step_size_log = 4.0 / (max_squared_sum + 4.0 * alpha +
int(fit_intercept))
step_size_sqr_ = get_auto_step_size(max_squared_sum_, alpha, "squared",
fit_intercept)
step_size_log_ = get_auto_step_size(max_squared_sum_, alpha, "log",
fit_intercept)
assert_almost_equal(step_size_sqr, step_size_sqr_, decimal=4)
assert_almost_equal(step_size_log, step_size_log_, decimal=4)
msg = 'Unknown loss function for SAG solver, got wrong instead of'
assert_raise_message(ValueError, msg, get_auto_step_size,
max_squared_sum_, alpha, "wrong", fit_intercept)
def test_get_max_squared_sum():
n_samples = 100
n_features = 10
rng = np.random.RandomState(42)
X = rng.randn(n_samples, n_features).astype(np.float64)
mask = rng.randn(n_samples, n_features)
X[mask > 0] = 0.
X_csr = sp.csr_matrix(X)
X[0, 3] = 0.
X_csr[0, 3] = 0.
sum_X = get_max_squared_sum(X)
sum_X_csr = get_max_squared_sum(X_csr)
assert_almost_equal(sum_X, sum_X_csr)
@ignore_warnings
def test_sag_regressor():
"""tests if the sag regressor performs well"""
xmin, xmax = -5, 5
n_samples = 20
tol = .001
max_iter = 20
alpha = 0.1
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.99)
assert_greater(score2, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf1 = Ridge(tol=tol, solver='sag', max_iter=max_iter,
alpha=alpha * n_samples)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
score1 = clf1.score(X, y)
score2 = clf2.score(X, y)
score2 = clf2.score(X, y)
assert_greater(score1, 0.5)
assert_greater(score2, 0.5)
@ignore_warnings
def test_sag_classifier_computed_correctly():
"""tests if the binary classifier is computed correctly"""
alpha = .1
n_samples = 50
n_iter = 50
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_sag_multiclass_computed_correctly():
"""tests if the multiclass classifier is computed correctly"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 40
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
dloss=log_dloss, n_iter=max_iter,
sparse=True,
fit_intercept=fit_intercept)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
@ignore_warnings
def test_classifier_results():
"""tests if classifier results match target"""
alpha = .1
n_features = 20
n_samples = 10
tol = .01
max_iter = 200
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
y = np.dot(X, w)
y = np.sign(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
pred1 = clf1.predict(X)
pred2 = clf2.predict(X)
assert_almost_equal(pred1, y, decimal=12)
assert_almost_equal(pred2, y, decimal=12)
@ignore_warnings
def test_binary_classifier_class_weight():
"""tests binary classifier with classweights for each class"""
alpha = .1
n_samples = 50
n_iter = 20
tol = .00001
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=2, random_state=10,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
y_tmp = np.ones(n_samples)
y_tmp[y != classes[1]] = -1
y = y_tmp
class_weight = {1: .45, -1: .55}
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=n_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
spweights, spintercept = sag_sparse(X, y, step_size, alpha, n_iter=n_iter,
dloss=log_dloss,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
spweights2, spintercept2 = sag_sparse(X, y, step_size, alpha,
n_iter=n_iter,
dloss=log_dloss, sparse=True,
sample_weight=sample_weight,
fit_intercept=fit_intercept)
assert_array_almost_equal(clf1.coef_.ravel(),
spweights.ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_, spintercept, decimal=1)
assert_array_almost_equal(clf2.coef_.ravel(),
spweights2.ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_, spintercept2, decimal=1)
@ignore_warnings
def test_multiclass_classifier_class_weight():
"""tests multiclass with classweights for each class"""
alpha = .1
n_samples = 20
tol = .00001
max_iter = 50
class_weight = {0: .45, 1: .55, 2: .75}
fit_intercept = True
X, y = make_blobs(n_samples=n_samples, centers=3, random_state=0,
cluster_std=0.1)
step_size = get_step_size(X, alpha, fit_intercept, classification=True)
classes = np.unique(y)
clf1 = LogisticRegression(solver='sag', C=1. / alpha / n_samples,
max_iter=max_iter, tol=tol, random_state=77,
fit_intercept=fit_intercept,
class_weight=class_weight)
clf2 = clone(clf1)
clf1.fit(X, y)
clf2.fit(sp.csr_matrix(X), y)
le = LabelEncoder()
class_weight_ = compute_class_weight(class_weight, np.unique(y), y)
sample_weight = class_weight_[le.fit_transform(y)]
coef1 = []
intercept1 = []
coef2 = []
intercept2 = []
for cl in classes:
y_encoded = np.ones(n_samples)
y_encoded[y != cl] = -1
spweights1, spintercept1 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight)
spweights2, spintercept2 = sag_sparse(X, y_encoded, step_size, alpha,
n_iter=max_iter, dloss=log_dloss,
sample_weight=sample_weight,
sparse=True)
coef1.append(spweights1)
intercept1.append(spintercept1)
coef2.append(spweights2)
intercept2.append(spintercept2)
coef1 = np.vstack(coef1)
intercept1 = np.array(intercept1)
coef2 = np.vstack(coef2)
intercept2 = np.array(intercept2)
for i, cl in enumerate(classes):
assert_array_almost_equal(clf1.coef_[i].ravel(),
coef1[i].ravel(),
decimal=2)
assert_almost_equal(clf1.intercept_[i], intercept1[i], decimal=1)
assert_array_almost_equal(clf2.coef_[i].ravel(),
coef2[i].ravel(),
decimal=2)
assert_almost_equal(clf2.intercept_[i], intercept2[i], decimal=1)
def test_classifier_single_class():
"""tests if ValueError is thrown with only one class"""
X = [[1, 2], [3, 4]]
y = [1, 1]
assert_raise_message(ValueError,
"This solver needs samples of at least 2 classes "
"in the data",
LogisticRegression(solver='sag').fit,
X, y)
def test_step_size_alpha_error():
X = [[0, 0], [0, 0]]
y = [1, -1]
fit_intercept = False
alpha = 1.
msg = ("Current sag implementation does not handle the case"
" step_size * alpha_scaled == 1")
clf1 = LogisticRegression(solver='sag', C=1. / alpha,
fit_intercept=fit_intercept)
assert_raise_message(ZeroDivisionError, msg, clf1.fit, X, y)
clf2 = Ridge(fit_intercept=fit_intercept, solver='sag', alpha=alpha)
assert_raise_message(ZeroDivisionError, msg, clf2.fit, X, y)
| bsd-3-clause |
leesavide/pythonista-docs | Documentation/matplotlib/users/plotting/examples/annotate_simple04.py | 6 | 1048 | import matplotlib.pyplot as plt
plt.figure(1, figsize=(3,3))
ax = plt.subplot(111)
ann = ax.annotate("Test",
xy=(0.2, 0.2), xycoords='data',
xytext=(0.8, 0.8), textcoords='data',
size=20, va="center", ha="center",
bbox=dict(boxstyle="round4", fc="w"),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3,rad=0.2",
relpos=(0., 0.),
fc="w"),
)
ann = ax.annotate("Test",
xy=(0.2, 0.2), xycoords='data',
xytext=(0.8, 0.8), textcoords='data',
size=20, va="center", ha="center",
bbox=dict(boxstyle="round4", fc="w"),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3,rad=-0.2",
relpos=(1., 0.),
fc="w"),
)
plt.show()
| apache-2.0 |
ahoyosid/scikit-learn | sklearn/neighbors/classification.py | 18 | 13871 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array of shape [n_samples, n_features]
A 2-D array representing the test points.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
YinongLong/scikit-learn | examples/mixture/plot_gmm.py | 122 | 3265 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians
obtained with Expectation Maximisation (``GaussianMixture`` class) and
Variational Inference (``BayesianGaussianMixture`` class models with
a Dirichlet process prior).
Both models have access to five components with which to fit the data. Note
that the Expectation Maximisation model will necessarily use all five
components while the Variational Inference model will effectively only use as
many as are needed for a good fit. Here we can see that the Expectation
Maximisation model splits some components arbitrarily, because it is trying to
fit too many components, while the Dirichlet Process model adapts it number of
state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
| bsd-3-clause |
Cignite/primdb | primdb_app/plot/massrange.py | 2 | 1421 | '''
Counting the number of precursor ion masses in a define range from the database data.
'''
import numpy.numarray as na
import matplotlib.pyplot as plt
import psycopg2
#establish connection with the postgres server with the given configuration
conn = psycopg2.connect(host="localhost",user="primuser",password="web",database="primdb")
rangelow = [200,400,800,1200,1600] #
rangehigh = [400,800,1200,1600,2000]
masscount = []
cur = conn.cursor()
for mlow, mhigh in (zip(rangelow, rangehigh)):
#fetch the number of precursor ion mass with the given range, for eg [200-400] and append it to masscount list
cur.execute("SELECT monoiso FROM primdb_app_selectedion where monoiso BETWEEN '%d' AND '%d'" %(mlow,mhigh))
masscount.append(cur.rowcount)
labels = ["200-400", "400-800","800-1200","1200-1600","1600-2000"]
maxitem = max(masscount) +1000
colors = ['r','g','y','b','b']
xlocations = na.array(range(len(masscount)))+0.5
width = 0.7
plt.bar(xlocations, masscount, width=width, color=colors)
plt.xticks(xlocations+ width/2, labels)
plt.xlim(0, xlocations[-1]+width*2)
plt.ylabel("Count per mass range")
plt.xlabel("M/Z")
for x,y in zip(xlocations,masscount):
plt.text(x+0.4, y, '%.2d' % y, ha='center', va= 'bottom')
#change the directory according to your application path.
plt.savefig(r'D:/Dropbox/Dropbox/primdb/assets/img/statistics1.png', dpi=100, transparent=True)
| agpl-3.0 |
CarlosA-Lopez/Proyecto_Embebidos_Grupo2 | plotly-1.2.9/plotly/matplotlylib/mplexporter/utils.py | 4 | 11384 | """
Utility Routines for Working with Matplotlib Objects
====================================================
"""
import itertools
import io
import base64
import numpy as np
import warnings
import matplotlib
from matplotlib.colors import colorConverter
from matplotlib.path import Path
from matplotlib.markers import MarkerStyle
from matplotlib.transforms import Affine2D
from matplotlib import ticker
def color_to_hex(color):
"""Convert matplotlib color code to hex color code"""
if color is None or colorConverter.to_rgba(color)[3] == 0:
return 'none'
else:
rgb = colorConverter.to_rgb(color)
return '#{0:02X}{1:02X}{2:02X}'.format(*(int(255 * c) for c in rgb))
def many_to_one(input_dict):
"""Convert a many-to-one mapping to a one-to-one mapping"""
return dict((key, val)
for keys, val in input_dict.items()
for key in keys)
LINESTYLES = many_to_one({('solid', '-', (None, None)): "10,0",
('dashed', '--'): "6,6",
('dotted', ':'): "2,2",
('dashdot', '-.'): "4,4,2,4",
('', ' ', 'None', 'none'): "none"})
def get_dasharray(obj, i=None):
"""Get an SVG dash array for the given matplotlib linestyle
Parameters
----------
obj : matplotlib object
The matplotlib line or path object, which must have a get_linestyle()
method which returns a valid matplotlib line code
i : integer (optional)
Returns
-------
dasharray : string
The HTML/SVG dasharray code associated with the object.
"""
if obj.__dict__.get('_dashSeq', None) is not None:
return ','.join(map(str, obj._dashSeq))
else:
ls = obj.get_linestyle()
if i is not None:
ls = ls[i]
dasharray = LINESTYLES.get(ls, None)
if dasharray is None:
warnings.warn("dash style '{0}' not understood: "
"defaulting to solid.".format(ls))
dasharray = LINESTYLES['-']
return dasharray
PATH_DICT = {Path.LINETO: 'L',
Path.MOVETO: 'M',
Path.CURVE3: 'S',
Path.CURVE4: 'C',
Path.CLOSEPOLY: 'Z'}
def SVG_path(path, transform=None, simplify=False):
"""Construct the vertices and SVG codes for the path
Parameters
----------
path : matplotlib.Path object
transform : matplotlib transform (optional)
if specified, the path will be transformed before computing the output.
Returns
-------
vertices : array
The shape (M, 2) array of vertices of the Path. Note that some Path
codes require multiple vertices, so the length of these vertices may
be longer than the list of path codes.
path_codes : list
A length N list of single-character path codes, N <= M. Each code is
a single character, in ['L','M','S','C','Z']. See the standard SVG
path specification for a description of these.
"""
if transform is not None:
path = path.transformed(transform)
vc_tuples = [(vertices if path_code != Path.CLOSEPOLY else [],
PATH_DICT[path_code])
for (vertices, path_code)
in path.iter_segments(simplify=simplify)]
if not vc_tuples:
# empty path is a special case
return np.zeros((0, 2)), []
else:
vertices, codes = zip(*vc_tuples)
vertices = np.array(list(itertools.chain(*vertices))).reshape(-1, 2)
return vertices, list(codes)
def get_path_style(path, fill=True):
"""Get the style dictionary for matplotlib path objects"""
style = {}
style['alpha'] = path.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['edgecolor'] = color_to_hex(path.get_edgecolor())
if fill:
style['facecolor'] = color_to_hex(path.get_facecolor())
else:
style['facecolor'] = 'none'
style['edgewidth'] = path.get_linewidth()
style['dasharray'] = get_dasharray(path)
style['zorder'] = path.get_zorder()
return style
def get_line_style(line):
"""Get the style dictionary for matplotlib line objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['color'] = color_to_hex(line.get_color())
style['linewidth'] = line.get_linewidth()
style['dasharray'] = get_dasharray(line)
style['zorder'] = line.get_zorder()
return style
def get_marker_style(line):
"""Get the style dictionary for matplotlib marker objects"""
style = {}
style['alpha'] = line.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['facecolor'] = color_to_hex(line.get_markerfacecolor())
style['edgecolor'] = color_to_hex(line.get_markeredgecolor())
style['edgewidth'] = line.get_markeredgewidth()
style['marker'] = line.get_marker()
markerstyle = MarkerStyle(line.get_marker())
markersize = line.get_markersize()
markertransform = (markerstyle.get_transform()
+ Affine2D().scale(markersize, -markersize))
style['markerpath'] = SVG_path(markerstyle.get_path(),
markertransform)
style['markersize'] = markersize
style['zorder'] = line.get_zorder()
return style
def get_text_style(text):
"""Return the text style dict for a text instance"""
style = {}
style['alpha'] = text.get_alpha()
if style['alpha'] is None:
style['alpha'] = 1
style['fontsize'] = text.get_size()
style['color'] = color_to_hex(text.get_color())
style['halign'] = text.get_horizontalalignment() # left, center, right
style['valign'] = text.get_verticalalignment() # baseline, center, top
style['rotation'] = text.get_rotation()
style['zorder'] = text.get_zorder()
return style
def get_axis_properties(axis):
"""Return the property dictionary for a matplotlib.Axis instance"""
props = {}
label1On = axis._major_tick_kw.get('label1On', True)
if isinstance(axis, matplotlib.axis.XAxis):
if label1On:
props['position'] = "bottom"
else:
props['position'] = "top"
elif isinstance(axis, matplotlib.axis.YAxis):
if label1On:
props['position'] = "left"
else:
props['position'] = "right"
else:
raise ValueError("{0} should be an Axis instance".format(axis))
# Use tick values if appropriate
locator = axis.get_major_locator()
props['nticks'] = len(locator())
if isinstance(locator, ticker.FixedLocator):
props['tickvalues'] = list(locator())
else:
props['tickvalues'] = None
# Find tick formats
formatter = axis.get_major_formatter()
if isinstance(formatter, ticker.NullFormatter):
props['tickformat'] = ""
elif not any(label.get_visible() for label in axis.get_ticklabels()):
props['tickformat'] = ""
else:
props['tickformat'] = None
# Get axis scale
props['scale'] = axis.get_scale()
# Get major tick label size (assumes that's all we really care about!)
labels = axis.get_ticklabels()
if labels:
props['fontsize'] = labels[0].get_fontsize()
else:
props['fontsize'] = None
# Get associated grid
props['grid'] = get_grid_style(axis)
return props
def get_grid_style(axis):
gridlines = axis.get_gridlines()
if axis._gridOnMajor and len(gridlines) > 0:
color = color_to_hex(gridlines[0].get_color())
alpha = gridlines[0].get_alpha()
dasharray = get_dasharray(gridlines[0])
return dict(gridOn=True,
color=color,
dasharray=dasharray,
alpha=alpha)
else:
return {"gridOn":False}
def get_figure_properties(fig):
return {'figwidth': fig.get_figwidth(),
'figheight': fig.get_figheight(),
'dpi': fig.dpi}
def get_axes_properties(ax):
props = {'axesbg': color_to_hex(ax.patch.get_facecolor()),
'axesbgalpha': ax.patch.get_alpha(),
'bounds': ax.get_position().bounds,
'dynamic': ax.get_navigate(),
'axison': ax.axison,
'frame_on': ax.get_frame_on(),
'axes': [get_axis_properties(ax.xaxis),
get_axis_properties(ax.yaxis)]}
for axname in ['x', 'y']:
axis = getattr(ax, axname + 'axis')
domain = getattr(ax, 'get_{0}lim'.format(axname))()
lim = domain
if isinstance(axis.converter, matplotlib.dates.DateConverter):
scale = 'date'
try:
import pandas as pd
from pandas.tseries.converter import PeriodConverter
except ImportError:
pd = None
if (pd is not None and isinstance(axis.converter,
PeriodConverter)):
_dates = [pd.Period(ordinal=int(d), freq=axis.freq)
for d in domain]
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second, 0)
for d in _dates]
else:
domain = [(d.year, d.month - 1, d.day,
d.hour, d.minute, d.second,
d.microsecond * 1E-3)
for d in matplotlib.dates.num2date(domain)]
else:
scale = axis.get_scale()
if scale not in ['date', 'linear', 'log']:
raise ValueError("Unknown axis scale: "
"{0}".format(axis[axname].get_scale()))
props[axname + 'scale'] = scale
props[axname + 'lim'] = lim
props[axname + 'domain'] = domain
return props
def iter_all_children(obj, skipContainers=False):
"""
Returns an iterator over all childen and nested children using
obj's get_children() method
if skipContainers is true, only childless objects are returned.
"""
if hasattr(obj, 'get_children') and len(obj.get_children()) > 0:
for child in obj.get_children():
if not skipContainers:
yield child
# could use `yield from` in python 3...
for grandchild in iter_all_children(child, skipContainers):
yield grandchild
else:
yield obj
def get_legend_properties(ax, legend):
handles, labels = ax.get_legend_handles_labels()
visible = legend.get_visible()
return {'handles': handles, 'labels': labels, 'visible': visible}
def image_to_base64(image):
"""
Convert a matplotlib image to a base64 png representation
Parameters
----------
image : matplotlib image object
The image to be converted.
Returns
-------
image_base64 : string
The UTF8-encoded base64 string representation of the png image.
"""
ax = image.axes
binary_buffer = io.BytesIO()
# image is saved in axes coordinates: we need to temporarily
# set the correct limits to get the correct image
lim = ax.axis()
ax.axis(image.get_extent())
image.write_png(binary_buffer)
ax.axis(lim)
binary_buffer.seek(0)
return base64.b64encode(binary_buffer.read()).decode('utf-8')
| unlicense |
lucidfrontier45/scikit-learn | examples/cluster/plot_lena_segmentation.py | 2 | 2410 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogenous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print __doc__
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>, Brian Cheung
# License: BSD
import time
import numpy as np
import scipy as sp
import pylab as pl
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
pl.figure(figsize=(5, 5))
pl.imshow(lena, cmap=pl.cm.gray)
for l in range(N_REGIONS):
pl.contour(labels == l, contours=1,
colors=[pl.cm.spectral(l / float(N_REGIONS)), ])
pl.xticks(())
pl.yticks(())
pl.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
pl.show()
| bsd-3-clause |
shenzebang/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
potash/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 43 | 39945 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from itertools import product
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def check_classification_toy(presort, loss):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1, presort=presort)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert_true(np.any(deviance_decrease >= 0.0))
leaves = clf.apply(X)
assert_equal(leaves.shape, (6, 10, 1))
def test_classification_toy():
for presort, loss in product(('auto', True, False),
('deviance', 'exponential')):
yield check_classification_toy, presort, loss
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def check_classification_synthetic(presort, loss):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.09)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=2,
max_depth=1, loss=loss,
learning_rate=1.0, subsample=0.5,
random_state=0,
presort=presort)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert_less(error_rate, 0.08)
def test_classification_synthetic():
for presort, loss in product(('auto', True, False), ('deviance', 'exponential')):
yield check_classification_synthetic, presort, loss
def check_boston(presort, loss, subsample):
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
ones = np.ones(len(boston.target))
last_y_pred = None
for sample_weight in None, ones, 2 * ones:
clf = GradientBoostingRegressor(n_estimators=100,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=1,
presort=presort)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
leaves = clf.apply(boston.data)
assert_equal(leaves.shape, (506, 100))
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_less(mse, 6.0)
if last_y_pred is not None:
assert_array_almost_equal(last_y_pred, y_pred)
last_y_pred = y_pred
def test_boston():
for presort, loss, subsample in product(('auto', True, False),
('ls', 'lad', 'huber'),
(1.0, 0.5)):
yield check_boston, presort, loss, subsample
def check_iris(presort, subsample, sample_weight):
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(n_estimators=100,
loss='deviance',
random_state=1,
subsample=subsample,
presort=presort)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
leaves = clf.apply(iris.data)
assert_equal(leaves.shape, (150, 100, 3))
def test_iris():
ones = np.ones(len(iris.target))
for presort, subsample, sample_weight in product(('auto', True, False),
(1.0, 0.5),
(None, ones)):
yield check_iris, presort, subsample, sample_weight
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 2, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state,
noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
clf = GradientBoostingRegressor(presort=presort)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 5.0)
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 1700.0)
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
for presort in True, False:
regression_params['presort'] = presort
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert_less(mse, 0.015)
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
for presort in True, False:
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=2, random_state=1,
presort=presort)
clf.fit(X, y)
assert_true(hasattr(clf, 'feature_importances_'))
# XXX: Remove this test in 0.19 after transform support to estimators
# is removed.
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = (
clf.feature_importances_ > clf.feature_importances_.mean())
assert_array_almost_equal(X_new, X[:, feature_mask])
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([[]])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict([rng.rand(2)]))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert_equal(clf.oob_improvement_.shape[0], 100)
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9)
assert_equal(clf.oob_improvement_.shape[0], clf.n_estimators)
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert_equal(est.estimators_[0, 0].max_depth, 1)
for i in range(1, 11):
assert_equal(est.estimators_[-i, 0].max_depth, 2)
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert_greater(est.score(X, y), 0.96)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert_equal(clf.estimators_.shape[0], 10)
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert_true(np.all(y_proba >= 0.0))
assert_true(np.all(y_proba <= 1.0))
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2,
loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
def check_sparse_input(EstimatorClass, X, X_sparse, y):
dense = EstimatorClass(n_estimators=10, random_state=0,
max_depth=2).fit(X, y)
sparse = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort=False).fit(X_sparse, y)
auto = EstimatorClass(n_estimators=10, random_state=0, max_depth=2,
presort='auto').fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
assert_array_almost_equal(sparse.apply(X), auto.apply(X))
assert_array_almost_equal(sparse.predict(X), auto.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
auto.feature_importances_)
if isinstance(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
assert_array_almost_equal(sparse.predict_proba(X),
auto.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
auto.predict_log_proba(X))
@skip_if_32bit
def test_sparse_input():
ests = (GradientBoostingClassifier, GradientBoostingRegressor)
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for EstimatorClass, sparse_matrix in product(ests, sparse_matrices):
yield check_sparse_input, EstimatorClass, X, sparse_matrix(X), y
| bsd-3-clause |
rohanp/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <robertlayton@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
yanlend/scikit-learn | setup.py | 76 | 9370 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
aswolf/xmeos | xmeos/test/test_models_composite.py | 1 | 40896 | import numpy as np
import xmeos
from xmeos import models
from xmeos.models import core
import pytest
import matplotlib.pyplot as plt
import matplotlib as mpl
from abc import ABCMeta, abstractmethod
import copy
import test_models
#====================================================================
# Define "slow" tests
# - indicated by @slow decorator
# - slow tests are run only if using --runslow cmd line arg
#====================================================================
slow = pytest.mark.skipif(
not pytest.config.getoption("--runslow"),
reason="need --runslow option to run"
)
#====================================================================
class TestMieGruneisenEos(test_models.BaseTestEos):
def load_eos(self, kind_thermal='Debye', kind_gamma='GammaPowLaw',
kind_compress='Vinet', compress_path_const='T', natom=1):
eos_mod = models.MieGruneisenEos(
kind_thermal=kind_thermal, kind_gamma=kind_gamma,
kind_compress=kind_compress,
compress_path_const=compress_path_const, natom=natom)
return eos_mod
def test_heat_capacity_T(self):
self._calc_test_heat_capacity(compress_path_const='T',
kind_thermal='Debye')
self._calc_test_heat_capacity(compress_path_const='T',
kind_thermal='Einstein')
self._calc_test_heat_capacity(compress_path_const='T',
kind_thermal='ConstHeatCap')
def test_heat_capacity_S(self):
self._calc_test_heat_capacity(compress_path_const='S',
kind_thermal='Debye')
self._calc_test_heat_capacity(compress_path_const='S',
kind_thermal='Einstein')
self._calc_test_heat_capacity(compress_path_const='S',
kind_thermal='ConstHeatCap')
def _calc_test_heat_capacity(self, kind_thermal='Debye',
kind_gamma='GammaPowLaw',
kind_compress='Vinet',
compress_path_const='T', natom=1):
TOL = 1e-3
Nsamp = 10001
eos_mod = self.load_eos(kind_thermal=kind_thermal,
kind_gamma=kind_gamma,
kind_compress=kind_compress,
compress_path_const=compress_path_const,
natom=natom)
Tmod_a = np.linspace(300.0, 3000.0, Nsamp)
V0, = eos_mod.get_param_values(param_names=['V0'])
# Vmod_a = V0*(0.6+.5*np.random.rand(Nsamp))
Vmod = V0*0.9
thermal_energy_a = eos_mod.thermal_energy(Vmod, Tmod_a)
heat_capacity_a = eos_mod.heat_capacity(Vmod, Tmod_a)
abs_err, rel_err, range_err = self.numerical_deriv(
Tmod_a, thermal_energy_a, heat_capacity_a, scale=1)
Cvlimfac = eos_mod.calculators['thermal']._get_Cv_limit()
assert rel_err < TOL, 'rel-error in Cv, ' + np.str(rel_err) + \
', must be less than TOL, ' + np.str(TOL)
def test_vol_T(self):
self._calc_test_vol(compress_path_const='T', kind_thermal='Debye')
self._calc_test_vol(compress_path_const='T', kind_thermal='Einstein')
self._calc_test_vol(compress_path_const='T', kind_thermal='ConstHeatCap')
def test_vol_S(self):
self._calc_test_vol(compress_path_const='S', kind_thermal='Debye')
self._calc_test_vol(compress_path_const='S', kind_thermal='Einstein')
self._calc_test_vol(compress_path_const='S', kind_thermal='ConstHeatCap')
def _calc_test_vol(self, kind_thermal='Debye', kind_gamma='GammaPowLaw',
kind_compress='Vinet', compress_path_const='T',
natom=1):
TOL = 1e-3
Nsamp = 101
eos_mod = self.load_eos(kind_thermal=kind_thermal,
kind_gamma=kind_gamma,
kind_compress=kind_compress,
compress_path_const=compress_path_const,
natom=natom)
V0, = eos_mod.get_param_values(param_names='V0')
Vmod_a = np.linspace(.7,1.2,Nsamp)*V0
T = 1000+2000*np.random.rand(Nsamp)
P_a = eos_mod.press(Vmod_a, T)
Vinfer_a = eos_mod.volume(P_a, T)
rel_err = np.abs((Vinfer_a-Vmod_a)/V0)
assert np.all(rel_err < TOL), 'relative error in volume, ' + np.str(rel_err) + \
', must be less than TOL, ' + np.str(TOL)
def test_press_T(self):
self._calc_test_press(compress_path_const='T', kind_thermal='Debye')
self._calc_test_press(compress_path_const='T', kind_thermal='Einstein')
@pytest.mark.xfail
def test_press_T_ConstHeatCap(self):
print()
print('ConstHeatCap is not thermodynamically consistent with '
'arbitrary GammaMod and cannot pass the isothermal press test.')
print()
self._calc_test_press(compress_path_const='T', kind_thermal='ConstHeatCap')
def test_press_S(self):
self._calc_test_press(compress_path_const='S', kind_thermal='Debye')
self._calc_test_press(compress_path_const='S', kind_thermal='Einstein')
self._calc_test_press(compress_path_const='S', kind_thermal='ConstHeatCap')
def _calc_test_press(self, kind_thermal='Debye', kind_gamma='GammaPowLaw',
kind_compress='Vinet', compress_path_const='T',
natom=1):
TOL = 1e-3
Nsamp = 10001
eos_mod = self.load_eos(kind_thermal=kind_thermal,
kind_gamma=kind_gamma,
kind_compress=kind_compress,
compress_path_const=compress_path_const,
natom=natom)
V0, = eos_mod.get_param_values(param_names='V0')
Vmod_a = np.linspace(.7,1.2,Nsamp)*V0
T = 4000
dV = Vmod_a[1] - Vmod_a[0]
Tref_path, theta_ref = eos_mod.ref_temp_path(Vmod_a)
if compress_path_const=='T':
P_a = eos_mod.press(Vmod_a, T)
F_a = eos_mod.helmholtz_energy(Vmod_a, T)
abs_err, rel_err, range_err = self.numerical_deriv(
Vmod_a, F_a, P_a, scale=-core.CONSTS['PV_ratio'])
elif compress_path_const=='S':
P_a = eos_mod.press(Vmod_a, Tref_path)
E_a = eos_mod.internal_energy(Vmod_a, Tref_path)
abs_err, rel_err, range_err = self.numerical_deriv(
Vmod_a, E_a, P_a, scale=-core.CONSTS['PV_ratio'])
else:
raise NotImplementedError(
'path_const '+path_const+' is not valid for CompressEos.')
assert range_err < TOL, 'range error in Press, ' + np.str(range_err) + \
', must be less than TOL, ' + np.str(TOL)
def test_thermal_press_T(self):
self._calc_test_thermal_press(compress_path_const='T', kind_thermal='Debye')
self._calc_test_thermal_press(compress_path_const='T', kind_thermal='Einstein')
self._calc_test_thermal_press(compress_path_const='T', kind_thermal='ConstHeatCap')
def test_thermal_press_S(self):
self._calc_test_thermal_press(compress_path_const='S', kind_thermal='Debye')
self._calc_test_thermal_press(compress_path_const='S', kind_thermal='Einstein')
self._calc_test_thermal_press(compress_path_const='S', kind_thermal='ConstHeatCap')
def _calc_test_thermal_press(self, kind_thermal='Debye',
kind_gamma='GammaPowLaw', kind_compress='Vinet',
compress_path_const='S', natom=1):
TOL = 1e-3
Nsamp = 10001
eos_mod = self.load_eos(kind_thermal=kind_thermal,
kind_gamma=kind_gamma,
kind_compress=kind_compress,
compress_path_const=compress_path_const,
natom=natom)
refstate_calc = eos_mod.calculators['refstate']
T0 = refstate_calc.ref_temp()
V0 = eos_mod.get_param_values(param_names='V0')
Vmod_a = np.linspace(.7,1.2,Nsamp)*V0
dV = Vmod_a[1] - Vmod_a[0]
Tref_path, theta_ref = eos_mod.ref_temp_path(Vmod_a)
P_therm = eos_mod.thermal_press(Vmod_a, Tref_path)
assert np.all(np.abs(P_therm) < TOL), 'Thermal press should be zero'
def test_thermal_energy_T(self):
self._calc_test_thermal_energy(compress_path_const='T', kind_thermal='Debye')
self._calc_test_thermal_energy(compress_path_const='T', kind_thermal='Einstein')
self._calc_test_thermal_energy(compress_path_const='T', kind_thermal='ConstHeatCap')
def test_thermal_energy_S(self):
self._calc_test_thermal_energy(compress_path_const='S', kind_thermal='Debye')
self._calc_test_thermal_energy(compress_path_const='S', kind_thermal='Einstein')
self._calc_test_thermal_energy(compress_path_const='S', kind_thermal='ConstHeatCap')
def _calc_test_thermal_energy(self, kind_thermal='Debye',
kind_gamma='GammaPowLaw', kind_compress='Vinet',
compress_path_const='S', natom=1):
TOL = 1e-3
Nsamp = 10001
eos_mod = self.load_eos(kind_thermal=kind_thermal,
kind_gamma=kind_gamma,
kind_compress=kind_compress,
compress_path_const=compress_path_const,
natom=natom)
refstate_calc = eos_mod.calculators['refstate']
T0 = refstate_calc.ref_temp()
V0 = eos_mod.get_param_values(param_names='V0')
Vmod_a = np.linspace(.7,1.2,Nsamp)*V0
dV = Vmod_a[1] - Vmod_a[0]
Tref_path, theta_ref = eos_mod.ref_temp_path(Vmod_a)
E_therm = eos_mod.thermal_energy(Vmod_a, Tref_path)
assert np.all(np.abs(E_therm) < TOL), 'Thermal energy should be zero'
def test_ref_entropy_path_S(self):
self._calc_test_ref_entropy_path(compress_path_const='S', kind_thermal='Debye')
self._calc_test_ref_entropy_path(compress_path_const='S', kind_thermal='Einstein')
self._calc_test_ref_entropy_path(compress_path_const='S', kind_thermal='ConstHeatCap')
def test_ref_entropy_path_T(self):
self._calc_test_ref_entropy_path(compress_path_const='T', kind_thermal='Debye')
self._calc_test_ref_entropy_path(compress_path_const='T', kind_thermal='Einstein')
self._calc_test_ref_entropy_path(compress_path_const='T', kind_thermal='ConstHeatCap')
def _calc_test_ref_entropy_path(self, kind_thermal='Debye',
kind_gamma='GammaPowLaw',
kind_compress='Vinet',
compress_path_const='S', natom=1):
TOL = 1e-3
Nsamp = 10001
eos_mod = self.load_eos(kind_thermal=kind_thermal,
kind_gamma=kind_gamma,
kind_compress=kind_compress,
compress_path_const=compress_path_const,
natom=natom)
refstate_calc = eos_mod.calculators['refstate']
T0 = refstate_calc.ref_temp()
V0, S0 = eos_mod.get_param_values(param_names=['V0','S0'])
Vmod_a = np.linspace(.7,1.2,Nsamp)*V0
dV = Vmod_a[1] - Vmod_a[0]
Tref_path, theta_ref = eos_mod.ref_temp_path(Vmod_a)
Sref_path = eos_mod.entropy(Vmod_a, Tref_path)
assert np.all(np.abs(Sref_path-S0) < TOL), 'Thermal energy should be zero'
def test_ref_temp_path_T(self):
self._calc_test_ref_temp_path(compress_path_const='T', kind_thermal='Debye')
self._calc_test_ref_temp_path(compress_path_const='T', kind_thermal='Einstein')
self._calc_test_ref_temp_path(compress_path_const='T', kind_thermal='ConstHeatCap')
def test_ref_temp_path_S(self):
self._calc_test_ref_temp_path(compress_path_const='S', kind_thermal='Debye')
self._calc_test_ref_temp_path(compress_path_const='S', kind_thermal='Einstein')
self._calc_test_ref_temp_path(compress_path_const='S', kind_thermal='ConstHeatCap')
def _calc_test_ref_temp_path(self, kind_thermal='Debye',
kind_gamma='GammaPowLaw', kind_compress='Vinet',
compress_path_const='T', natom=1):
TOL = 1e-3
Nsamp = 10001
eos_mod = self.load_eos(kind_thermal=kind_thermal,
kind_gamma=kind_gamma,
kind_compress=kind_compress,
compress_path_const=compress_path_const,
natom=natom)
refstate_calc = eos_mod.calculators['refstate']
T0 = refstate_calc.ref_temp()
V0 = eos_mod.get_param_values(param_names='V0')
Vmod_a = np.linspace(.7,1.2,Nsamp)*V0
Tref_path, theta_ref = eos_mod.ref_temp_path(Vmod_a)
if compress_path_const=='T':
assert np.all(Tref_path==T0), 'Thermal path should be constant'
if compress_path_const=='S':
gamma_calc = eos_mod.calculators['gamma']
Tpath_a = gamma_calc._calc_temp(Vmod_a, T0=T0)
assert np.all(Tref_path==Tpath_a), 'Thermal path should be along gamma-derived path'
#====================================================================
class TestRTPolyEos(test_models.BaseTestEos):
def load_eos(self, kind_compress='Vinet', compress_order=3,
compress_path_const='T', kind_RTpoly='V', RTpoly_order=5, natom=1):
eos_mod = models.RTPolyEos(
kind_compress=kind_compress, compress_path_const=compress_path_const,
kind_RTpoly=kind_RTpoly, RTpoly_order=RTpoly_order, natom=natom)
return eos_mod
def test_RTcoefs(self, kind_compress='Vinet', compress_order=3,
compress_path_const='T', kind_RTpoly='V',
RTpoly_order=5, natom=1):
TOL = 1e-3
Nsamp = 10001
eos_mod = self.load_eos(kind_compress=kind_compress,
compress_order=compress_order,
compress_path_const=compress_path_const,
kind_RTpoly=kind_RTpoly,
RTpoly_order=RTpoly_order,
natom=natom)
V0, = eos_mod.get_param_values(param_names='V0')
Vmod_a = np.linspace(.5,1.2,Nsamp)*V0
dV = Vmod_a[1] - Vmod_a[0]
acoef_a, bcoef_a = eos_mod.calc_RTcoefs(Vmod_a)
acoef_deriv_a, bcoef_deriv_a = eos_mod.calc_RTcoefs_deriv(Vmod_a)
a_abs_err, a_rel_err, a_range_err = self.numerical_deriv(
Vmod_a, acoef_a, acoef_deriv_a, scale=1)
b_abs_err, b_rel_err, b_range_err = self.numerical_deriv(
Vmod_a, bcoef_a, bcoef_deriv_a, scale=1)
assert a_range_err < TOL, 'range error in acoef, ' + \
np.str(a_range_err) + ', must be less than TOL, ' + np.str(TOL)
assert b_range_err < TOL, 'range error in bcoef, ' + \
np.str(b_range_err) + ', must be less than TOL, ' + np.str(TOL)
def test_heat_capacity_T(self):
self._calc_test_heat_capacity(compress_path_const='T', RTpoly_order=5)
def _calc_test_heat_capacity(self, kind_compress='Vinet', compress_order=3,
compress_path_const='T', kind_RTpoly='V',
RTpoly_order=5, natom=1):
TOL = 1e-3
Nsamp = 10001
eos_mod = self.load_eos(kind_compress=kind_compress,
compress_order=compress_order,
compress_path_const=compress_path_const,
kind_RTpoly=kind_RTpoly,
RTpoly_order=RTpoly_order,
natom=natom)
Tmod_a = np.linspace(300.0, 3000.0, Nsamp)
V0, = eos_mod.get_param_values(param_names=['V0'])
# Vmod = V0*(0.6+.5*np.random.rand(Nsamp))
Vmod = V0*0.7
thermal_energy_a = eos_mod.thermal_energy(Vmod, Tmod_a)
heat_capacity_a = eos_mod.heat_capacity(Vmod, Tmod_a)
abs_err, rel_err, range_err = self.numerical_deriv(
Tmod_a, thermal_energy_a, heat_capacity_a, scale=1)
Cvlimfac = eos_mod.calculators['thermal']._get_Cv_limit()
assert rel_err < TOL, 'rel-error in Cv, ' + np.str(rel_err) + \
', must be less than TOL, ' + np.str(TOL)
#====================================================================
class TestRTPressEos(test_models.BaseTestEos):
def load_eos(self, kind_compress='Vinet', compress_path_const='T',
kind_gamma='GammaFiniteStrain', kind_RTpoly='V',
RTpoly_order=5, natom=1, kind_electronic='None',
apply_electronic=False):
eos_mod = models.RTPressEos(
kind_compress=kind_compress,
compress_path_const=compress_path_const,
kind_gamma=kind_gamma, kind_RTpoly=kind_RTpoly,
apply_electronic=apply_electronic, kind_electronic=kind_electronic,
RTpoly_order=RTpoly_order, natom=natom)
return eos_mod
def test_apply_elec(self, kind_compress='Vinet', compress_path_const='T',
kind_gamma='GammaFiniteStrain', kind_RTpoly='V',
RTpoly_order=5, natom=1):
Nsamp = 10001
eos_mod = self.load_eos(kind_compress=kind_compress,
compress_path_const=compress_path_const,
kind_gamma=kind_gamma, kind_RTpoly=kind_RTpoly,
RTpoly_order=RTpoly_order, natom=natom)
eos_mod_elec = self.load_eos(kind_compress=kind_compress,
compress_path_const=compress_path_const,
kind_gamma=kind_gamma,
kind_RTpoly=kind_RTpoly,
RTpoly_order=RTpoly_order, natom=natom,
kind_electronic='CvPowLaw',
apply_electronic=True)
V0, = eos_mod.get_param_values(param_names='V0')
Vmod_a = np.linspace(.5,1.2,Nsamp)*V0
T= 8000
dS_elec = eos_mod_elec.entropy(Vmod_a, T) - eos_mod.entropy(Vmod_a, T)
assert np.all(dS_elec > 0), (
'Electronic contribution to entropy must be positive at high temp.'
)
def test_RTcoefs(self, kind_compress='Vinet', compress_path_const='T',
kind_gamma='GammaFiniteStrain',
RTpoly_order=5, natom=1):
self.calc_test_RTcoefs(kind_compress=kind_compress,
compress_path_const=compress_path_const,
kind_gamma=kind_gamma, kind_RTpoly='V',
RTpoly_order=RTpoly_order, natom=natom)
self.calc_test_RTcoefs(kind_compress=kind_compress,
compress_path_const=compress_path_const,
kind_gamma=kind_gamma, kind_RTpoly='logV',
RTpoly_order=RTpoly_order, natom=natom)
self.calc_test_RTcoefs(kind_compress=kind_compress,
compress_path_const=compress_path_const,
kind_gamma=kind_gamma, kind_RTpoly='V',
RTpoly_order=RTpoly_order, natom=natom,
kind_electronic='CvPowLaw',
apply_electronic=True)
pass
def calc_test_RTcoefs(self, kind_compress='Vinet',
compress_path_const='T',
kind_gamma='GammaFiniteStrain', kind_RTpoly='V',
RTpoly_order=5, natom=1, kind_electronic='None',
apply_electronic=False):
TOL = 1e-3
Nsamp = 10001
eos_mod = self.load_eos(kind_compress=kind_compress,
compress_path_const=compress_path_const,
kind_gamma=kind_gamma, kind_RTpoly=kind_RTpoly,
RTpoly_order=RTpoly_order, natom=natom,
kind_electronic=kind_electronic,
apply_electronic=apply_electronic)
V0, = eos_mod.get_param_values(param_names='V0')
Vmod_a = np.linspace(.5,1.2,Nsamp)*V0
dV = Vmod_a[1] - Vmod_a[0]
bcoef_a = eos_mod.calc_RTcoefs(Vmod_a)
bcoef_deriv_a = eos_mod.calc_RTcoefs_deriv(Vmod_a)
b_abs_err, b_rel_err, b_range_err = self.numerical_deriv(
Vmod_a, bcoef_a, bcoef_deriv_a, scale=1)
assert b_range_err < TOL, 'range error in bcoef, ' + \
np.str(b_range_err) + ', must be less than TOL, ' + np.str(TOL)
def test_heat_capacity_T(self):
self._calc_test_heat_capacity(compress_path_const='T', RTpoly_order=5)
self._calc_test_heat_capacity(compress_path_const='T', RTpoly_order=5,
kind_electronic='CvPowLaw',
apply_electronic=True)
def _calc_test_heat_capacity(self, kind_compress='Vinet',
compress_path_const='T',
kind_gamma='GammaFiniteStrain',
kind_RTpoly='V', RTpoly_order=5, natom=1,
kind_electronic='None',
apply_electronic=False):
TOL = 1e-3
Nsamp = 10001
eos_mod = self.load_eos(kind_compress=kind_compress,
compress_path_const=compress_path_const,
kind_gamma=kind_gamma, kind_RTpoly=kind_RTpoly,
RTpoly_order=RTpoly_order, natom=natom,
kind_electronic=kind_electronic,
apply_electronic=apply_electronic)
Tmod_a = np.linspace(3000.0, 8000.0, Nsamp)
V0, = eos_mod.get_param_values(param_names=['V0'])
# Vmod = V0*(0.6+.5*np.random.rand(Nsamp))
Vmod = V0*0.7
thermal_energy_a = eos_mod.thermal_energy(Vmod, Tmod_a)
heat_capacity_a = eos_mod.heat_capacity(Vmod, Tmod_a)
abs_err, rel_err, range_err = self.numerical_deriv(
Tmod_a, thermal_energy_a, heat_capacity_a, scale=1)
Cvlimfac = eos_mod.calculators['thermal']._get_Cv_limit()
assert rel_err < TOL, 'rel-error in Cv, ' + np.str(rel_err) + \
', must be less than TOL, ' + np.str(TOL)
def test_gamma(self, kind_compress='Vinet', compress_path_const='T',
kind_gamma='GammaFiniteStrain', kind_RTpoly='logV',
RTpoly_order=5, natom=1):
TOL = 1e-3
Nsamp = 10001
eos_mod = self.load_eos(kind_compress=kind_compress,
compress_path_const=compress_path_const,
kind_gamma=kind_gamma, kind_RTpoly=kind_RTpoly,
RTpoly_order=RTpoly_order, natom=natom)
V0 = eos_mod.get_params()['V0']
Vmod_a = np.linspace(.7,1.2,Nsamp)*V0
T = 2000
# T0S_a = eos_mod.ref_temp_adiabat(Vmod_a)
gamma_a = eos_mod.gamma(Vmod_a, T)
CV_a = eos_mod.heat_capacity(Vmod_a,T)
KT_a = eos_mod.bulk_mod(Vmod_a,T)
alpha_a = models.CONSTS['PV_ratio']*gamma_a/Vmod_a*CV_a/KT_a
dPdT_a = alpha_a*KT_a
dT = 10
dPdT_num = (eos_mod.press(Vmod_a,T+dT/2) -
eos_mod.press(Vmod_a,T-dT/2))/dT
range_err = np.max(np.abs(
(dPdT_a-dPdT_num)/(np.max(dPdT_a)-np.min(dPdT_a))
))
assert range_err < TOL, 'Thermal press calculated from gamma does not match numerical value'
# alpha = 1/V*dVdT_P = -1/V*dVdP_T*dPdT_V = -1/K_T*dPdT_V
# alpha*K_T
pass
def _test_press_T(self):
self._calc_test_press(kind_RTpoly='V')
self._calc_test_press(kind_RTpoly='logV')
self._calc_test_press(kind_RTpoly='V', kind_compress='BirchMurn3')
self._calc_test_press(kind_RTpoly='logV', kind_compress='BirchMurn3')
self._calc_test_press(kind_RTpoly='V', kind_gamma='GammaPowLaw' )
self._calc_test_press(kind_RTpoly='logV', kind_gamma='GammaPowLaw')
self._calc_test_press(kind_RTpoly='V', kind_gamma='GammaPowLaw',
kind_electronic='CvPowLaw', apply_electronic=True)
pass
def _calc_test_press(self, kind_compress='Vinet',
compress_path_const='T',
kind_gamma='GammaFiniteStrain',
kind_RTpoly='V', RTpoly_order=5, natom=1,
kind_electronic='None',
apply_electronic=False):
TOL = 1e-3
Nsamp = 10001
eos_mod = self.load_eos(kind_compress=kind_compress,
compress_path_const=compress_path_const,
kind_gamma=kind_gamma, kind_RTpoly=kind_RTpoly,
RTpoly_order=RTpoly_order, natom=natom,
kind_electronic=kind_electronic,
apply_electronic=apply_electronic)
refstate_calc = eos_mod.calculators['refstate']
T0 = refstate_calc.ref_temp()
V0 = refstate_calc.ref_volume()
S0 = refstate_calc.ref_entropy()
# V0, T0, S0 = eos_mod.get_param_values(param_names=['V0','T0','S0'])
Vmod_a = np.linspace(.7,1.2,Nsamp)*V0
T = 7000
dV = Vmod_a[1] - Vmod_a[0]
Tref_path = eos_mod.ref_temp_adiabat(Vmod_a)
if compress_path_const=='T':
P_a = eos_mod.press(Vmod_a, T)
F_a = eos_mod.helmholtz_energy(Vmod_a, T)
abs_err, rel_err, range_err = self.numerical_deriv(
Vmod_a, F_a, P_a, scale=-core.CONSTS['PV_ratio'])
# plt.ion()
# plt.figure()
# plt.plot(Vmod_a,P_a+core.CONSTS['PV_ratio']*
# np.gradient(F_a,dV),'k-')
elif compress_path_const=='S':
P_a = eos_mod.press(Vmod_a, Tref_path)
E_a = eos_mod.internal_energy(Vmod_a, Tref_path)
abs_err, rel_err, range_err = self.numerical_deriv(
Vmod_a, E_a, P_a, scale=-core.CONSTS['PV_ratio'])
else:
raise NotImplementedError(
'path_const '+path_const+' is not valid for CompressEos.')
# Etherm_a = eos_mod.thermal_energy(Vmod_a,T)
# Stherm_a = (
# eos_mod.thermal_entropy(Vmod_a,T)
# -eos_mod.thermal_entropy(Vmod_a,T0))
# Stherm_a = eos_mod.thermal_entropy(Vmod_a,T)
# # S_a = Stherm_a + S0
# Pnum_therm_E = -core.CONSTS['PV_ratio']*np.gradient(Etherm_a,dV)
# # Pnum_therm_S = +core.CONSTS['PV_ratio']*np.gradient((T-T0)*S_a,dV)
# Pnum_therm_S = +core.CONSTS['PV_ratio']*np.gradient(T*Stherm_a,dV)
# P_compress = eos_mod.compress_press(Vmod_a)
# P_therm_S = eos_mod._calc_thermal_press_S(Vmod_a, T)
# P_therm_E = eos_mod._calc_thermal_press_E(Vmod_a, T)
# Pnum_tot_a = -core.CONSTS['PV_ratio']*np.gradient(F_a,dV)
# S0, = eos_mod.get_param_values(param_names=['S0'])
# eos_mod.thermal_energy(Vmod_a, T)
# print('abs_err = ', abs_err)
# print('dP_S = ', Pnum_therm_S-P_therm_S)
# print('dP_E = ', Pnum_therm_E-P_therm_E)
# # assert range_err < TOL, ('range error in Press, ' + np.str(range_err) +
# # ', must be less than TOL, ' + np.str(TOL))
P5_a = eos_mod.press(Vmod_a, 5000)
P3_a = eos_mod.press(Vmod_a, 3000)
# plt.ion()
# plt.figure()
# plt.plot(Vmod_a,P3_a,'b-')
# plt.plot(Vmod_a,P5_a,'r-')
# plt.xlabel('V')
# plt.ylabel('P')
# assert np.all((P5_a-P3_a)>0), 'Thermal pressure must be positive'
# assert False, 'nope'
assert abs_err < TOL, ('abs error in Press, ' + np.str(range_err) +
', must be less than TOL, ' + np.str(TOL))
def test_press_simple(self, kind_compress='Vinet',
compress_path_const='T',
kind_gamma='GammaFiniteStrain',
kind_RTpoly='V', RTpoly_order=5, natom=1,
kind_electronic='CvPowLaw', apply_electronic=True):
TOL = 1e-3
Nsamp = 10001
eos_mod = self.load_eos(kind_compress=kind_compress,
compress_path_const=compress_path_const,
kind_gamma=kind_gamma, kind_RTpoly=kind_RTpoly,
RTpoly_order=RTpoly_order, natom=natom,
kind_electronic=kind_electronic,
apply_electronic=apply_electronic)
refstate_calc = eos_mod.calculators['refstate']
T0 = refstate_calc.ref_temp()
V0 = refstate_calc.ref_volume()
S0 = refstate_calc.ref_entropy()
# V0, T0, S0 = eos_mod.get_param_values(param_names=['V0','T0','S0'])
Vmod_a = np.linspace(.7,1.2,Nsamp)*V0
T = 8000
dV = Vmod_a[1] - Vmod_a[0]
P_a = eos_mod.press(Vmod_a, T)
F_a = eos_mod.helmholtz_energy(Vmod_a, T)
abs_err, rel_err, range_err = self.numerical_deriv(
Vmod_a, F_a, P_a, scale=-core.CONSTS['PV_ratio'])
S_a = eos_mod.entropy(Vmod_a, T)
assert abs_err < TOL, ('abs error in Press, ' + np.str(abs_err) +
', must be less than TOL, ' + np.str(TOL))
def test_adiabatic_path(self):
self._calc_test_adiabatic_path()
self._calc_test_adiabatic_path(kind_electronic='CvPowLaw',
apply_electronic=True)
def _calc_test_adiabatic_path(self, kind_compress='Vinet',
compress_path_const='T',
kind_gamma='GammaFiniteStrain',
kind_RTpoly='V', RTpoly_order=5, natom=1,
kind_electronic='None',
apply_electronic=False):
TOL = 1e-3
# Nsamp = 10001
eos_mod = self.load_eos(kind_compress=kind_compress,
compress_path_const=compress_path_const,
kind_gamma=kind_gamma, kind_RTpoly=kind_RTpoly,
RTpoly_order=RTpoly_order, natom=natom,
kind_electronic=kind_electronic,
apply_electronic=apply_electronic)
V0, = eos_mod.get_param_values(param_names=['V0'])
# Vmod = V0*(0.6+.5*np.random.rand(Nsamp))
# Vmod_a = np.linspace(.7,1.2,Nsamp)*V0
P_a = np.linspace(0,150,101)
Tfoot = 10000
# dV = Vmod_a[1] - Vmod_a[0]
V_ad, T_ad = eos_mod.adiabatic_path(Tfoot, P_a)
S = eos_mod.entropy(V_ad, T_ad)
dS = S - np.mean(S)
assert np.all(np.abs(dS)<TOL), (
'The entropy must be constant along an adiabat to within TOL.'
)
def test_adiabatic_path_grid(self):
self._calc_test_adiabatic_path_grid()
self._calc_test_adiabatic_path_grid(kind_electronic='CvPowLaw',
apply_electronic=True)
def _calc_test_adiabatic_path_grid(self, kind_compress='Vinet',
compress_path_const='T',
kind_gamma='GammaFiniteStrain',
kind_RTpoly='V', RTpoly_order=5, natom=1,
kind_electronic='None',
apply_electronic=False):
TOL = 1e-3
# Nsamp = 10001
eos_mod = self.load_eos(kind_compress=kind_compress,
compress_path_const=compress_path_const,
kind_gamma=kind_gamma, kind_RTpoly=kind_RTpoly,
RTpoly_order=RTpoly_order, natom=natom,
kind_electronic=kind_electronic,
apply_electronic=apply_electronic)
V0, = eos_mod.get_param_values(param_names=['V0'])
# Vmod = V0*(0.6+.5*np.random.rand(Nsamp))
# Vmod_a = np.linspace(.7,1.2,Nsamp)*V0
Pgrid = np.linspace(0,150,31)
Tfoot_grid = np.array([3000,4000,5000,6000,8000,10000])
V_ad_grid, T_ad_grid = eos_mod.adiabatic_path_grid(Tfoot_grid, Pgrid)
S_ad_grid = np.zeros(V_ad_grid.shape)
for ind, (iV_ad, iT_ad) in enumerate(zip(V_ad_grid, T_ad_grid)):
iS = eos_mod.entropy(iV_ad, iT_ad)
S_ad_grid[ind] = iS
dS_ad_grid = S_ad_grid - np.tile(
np.mean(S_ad_grid, axis=1)[:,np.newaxis],(1,Pgrid.size))
assert np.all(np.abs(dS_ad_grid)<TOL), (
'The entropy must be constant along an adiabat to within TOL.'
)
#====================================================================
#====================================================================
# class TestRosenfeldTaranzonaPoly(BaseTestThermalMod):
# def init_params(self,eos_d):
#
# core.set_consts( [], [], eos_d )
#
# # Set model parameter values
# mexp = 3.0/5
# T0 = 4000.0
# V0_ccperg = 0.408031 # cc/g
# K0 = 13.6262
# KP0= 7.66573
# E0 = 0.0
# # nfac = 5.0
# # mass = (24.31+28.09+3*16.0) # g/(mol atom)
# # V0 = V0_ccperg
#
# # NOTE that units are all per atom
# # requires conversion from values reported in Spera2011
# lognfac = 0.0
# mass = (24.31+28.09+3*16.0)/5.0 # g/(mol atom)
# Vconv_fac = mass*eos_d['const_d']['ang3percc']/eos_d['const_d']['Nmol']
# V0 = V0_ccperg*Vconv_fac
#
#
# param_key_a = ['mexp','lognfac','T0','V0','K0','KP0','E0','mass']
# param_val_a = np.array([mexp,lognfac,T0,V0,K0,KP0,E0,mass])
# core.set_params( param_key_a, param_val_a, eos_d )
#
# # Set parameter values from Spera et al. (2011)
# # for MgSiO3 melt using (Oganov potential)
#
# # Must convert energy units from kJ/g to eV/atom
# energy_conv_fac = mass/eos_d['const_d']['kJ_molpereV']
# core.set_consts( ['energy_conv_fac'], [energy_conv_fac],
# eos_d )
#
# # change coefficients to relative
# # acoef_a = energy_conv_fac*\
# # np.array([127.116,-3503.98,20724.4,-60212.0,86060.5,-48520.4])
# # bcoef_a = energy_conv_fac*\
# # np.array([-0.371466,7.09542,-45.7362,139.020,-201.487,112.513])
# Vconv_a = (1.0/Vconv_fac)**np.arange(6)
#
#
# unit_conv = energy_conv_fac*Vconv_a
#
# # Reported vol-dependent polynomial coefficients for a and b
# # in Spera2011
# acoef_unscl_a = np.array([127.116,-3503.98,20724.4,-60212.0,\
# 86060.5,-48520.4])
# bcoef_unscl_a = np.array([-0.371466,7.09542,-45.7362,139.020,\
# -201.487,112.513])
#
# # Convert units and transfer to normalized version of RT model
# acoef_a = unit_conv*(acoef_unscl_a+bcoef_unscl_a*T0**mexp)
# bcoef_a = unit_conv*bcoef_unscl_a*T0**mexp
#
# core.set_array_params( 'acoef', acoef_a, eos_d )
# core.set_array_params( 'bcoef', bcoef_a, eos_d )
#
# self.load_eos_mod( eos_d )
#
# # from IPython import embed; embed(); import ipdb; ipdb.set_trace()
# return eos_d
#
# def test_RT_potenergy_curves_Spera2011(self):
# Nsamp = 101
# eos_d = self.init_params({})
#
# param_d = eos_d['param_d']
# Vgrid_a = np.linspace(0.5,1.1,Nsamp)*param_d['V0']
# Tgrid_a = np.linspace(100.0**(5./3),180.0**(5./3),11)
#
# full_mod = eos_d['modtype_d']['FullMod']
# thermal_mod = eos_d['modtype_d']['ThermalMod']
#
# energy_conv_fac, = core.get_consts(['energy_conv_fac'],eos_d)
#
# potenergy_mod_a = []
#
# for iV in Vgrid_a:
# ipotenergy_a = thermal_mod.calc_energy_pot(iV,Tgrid_a,eos_d)
# potenergy_mod_a.append(ipotenergy_a)
#
# # energy_mod_a = np.array( energy_mod_a )
# potenergy_mod_a = np.array( potenergy_mod_a )
#
# plt.ion()
# plt.figure()
# plt.plot(Tgrid_a**(3./5), potenergy_mod_a.T/energy_conv_fac,'-')
# plt.xlim(100,180)
# plt.ylim(-102,-95)
#
# print 'Compare this plot with Spera2011 Fig 1b (Oganov potential):'
# print 'Do the figures agree (y/n or k for keyboard)?'
# s = raw_input('--> ')
# if s=='k':
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
#
# assert s=='y', 'Figure must match published figure'
# pass
#
# def test_energy_curves_Spera2011(self):
# Nsamp = 101
# eos_d = self.init_params({})
#
# param_d = eos_d['param_d']
# Vgrid_a = np.linspace(0.4,1.1,Nsamp)*param_d['V0']
# Tgrid_a = np.array([2500,3000,3500,4000,4500,5000])
#
# full_mod = eos_d['modtype_d']['FullMod']
#
# energy_conv_fac, = core.get_consts(['energy_conv_fac'],eos_d)
#
# energy_mod_a = []
# press_mod_a = []
#
# for iT in Tgrid_a:
# ienergy_a = full_mod.energy(Vgrid_a,iT,eos_d)
# ipress_a = full_mod.press(Vgrid_a,iT,eos_d)
# energy_mod_a.append(ienergy_a)
# press_mod_a.append(ipress_a)
#
# # energy_mod_a = np.array( energy_mod_a )
# energy_mod_a = np.array( energy_mod_a )
# press_mod_a = np.array( press_mod_a )
#
# plt.ion()
# plt.figure()
# plt.plot(press_mod_a.T, energy_mod_a.T/energy_conv_fac,'-')
# plt.legend(Tgrid_a,loc='lower right')
# plt.xlim(-5,165)
# plt.ylim(-100.5,-92)
#
# # from IPython import embed; embed(); import ipdb; ipdb.set_trace()
#
# print 'Compare this plot with Spera2011 Fig 2b (Oganov potential):'
# print 'Do the figures agree (y/n or k for keyboard)?'
# s = raw_input('--> ')
# if s=='k':
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
#
# assert s=='y', 'Figure must match published figure'
# pass
#
# def test_heat_capacity_curves_Spera2011(self):
# Nsamp = 101
# eos_d = self.init_params({})
#
# param_d = eos_d['param_d']
# Vgrid_a = np.linspace(0.4,1.2,Nsamp)*param_d['V0']
# Tgrid_a = np.array([2500,3000,3500,4000,4500,5000])
#
# full_mod = eos_d['modtype_d']['FullMod']
# thermal_mod = eos_d['modtype_d']['ThermalMod']
#
# heat_capacity_mod_a = []
# energy_conv_fac, = core.get_consts(['energy_conv_fac'],eos_d)
#
# energy_mod_a = []
# press_mod_a = []
#
# for iT in Tgrid_a:
# iheat_capacity_a = thermal_mod.heat_capacity(Vgrid_a,iT,eos_d)
# ienergy_a = full_mod.energy(Vgrid_a,iT,eos_d)
# ipress_a = full_mod.press(Vgrid_a,iT,eos_d)
#
# heat_capacity_mod_a.append(iheat_capacity_a)
# energy_mod_a.append(ienergy_a)
# press_mod_a.append(ipress_a)
#
#
# # energy_mod_a = np.array( energy_mod_a )
# heat_capacity_mod_a = np.array( heat_capacity_mod_a )
# energy_mod_a = np.array( energy_mod_a )
# press_mod_a = np.array( press_mod_a )
#
# plt.ion()
# plt.figure()
# plt.plot(press_mod_a.T,1e3*heat_capacity_mod_a.T/energy_conv_fac,'-')
# plt.legend(Tgrid_a,loc='lower right')
# # plt.ylim(1.2,1.9)
# plt.xlim(-5,240)
#
# print 'Compare this plot with Spera2011 Fig 2b (Oganov potential):'
# print 'Do the figures agree (y/n or k for keyboard)?'
# s = raw_input('--> ')
# if s=='k':
# from IPython import embed; embed(); import ipdb; ipdb.set_trace()
#
# assert s=='y', 'Figure must match published figure'
# pass
#====================================================================
| mit |
ageron/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 39 | 32726 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.deprecation import deprecated
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([(k,
out_el_shape(v, n_classes[k]
if n_classes is not None and
k in n_classes else None))
for k, v in list(y_shape.items())])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
@deprecated(None, 'Please convert numpy dtypes explicitly.')
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please use tensorflow/transform or tf.data.')
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(
x, dict), y is not None and isinstance(y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else (dict(
[(k, check_array(v, v.dtype)) for k, v in list(y.items())])
if y_is_dict else check_array(y, y.dtype))
# self.n_classes is not None means we're converting raw target indices
# to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (
np.int64 if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())])
if x_is_dict else _check_dtype(self._x.dtype))
# self._output_dtype == np.float32 when y is None
self._output_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())])
if y_is_dict else (_check_dtype(self._y.dtype)
if y is not None else np.float32))
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if x_is_dict:
num_samples = list(self._x.values())[0].shape[0]
elif tensor_util.is_tensor(self._x):
num_samples = self._x.shape[
0].value # shape will be a Dimension, extract an int
else:
num_samples = self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1))
if len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(
self._x.values())[0].shape[0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else {
self._input_placeholder.name:
extract(self._x, batch_indices)
})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = (self.output_shape, self._output_dtype,
self.n_classes)
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = (
[1] + list(y_first_el[0].shape
if isinstance(y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict(
[(k, np.zeros(shape[k], dtype[k])) for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
@deprecated(None, 'Please feed input to tf.data to support dask.')
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.placeholder for input features mini batch.
output_placeholder: tf.placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
rahulremanan/python_tutorial | NLP/00-Multivariate_LSTM/src/binary_classification.py | 1 | 12229 | '''
Created on 06 lug 2017
@author: mantica
https://github.com/Azure/lstms_for_predictive_maintenance/blob/master/Deep%20Learning%20Basics%20for%20Predictive%20Maintenance.ipynb
https://ti.arc.nasa.gov/tech/dash/pcoe/prognostic-data-repository/#turbofan
Binary classification: Predict if an asset will fail within certain time frame (e.g. cycles)
'''
import keras
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
# Setting seed for reproducibility
np.random.seed(1234)
PYTHONHASHSEED = 0
from sklearn import preprocessing
from sklearn.metrics import confusion_matrix, recall_score, precision_score
from keras.models import Sequential,load_model
from keras.layers import Dense, Dropout, LSTM
# define path to save model
model_path = '../../Output/binary_model.h5'
##################################
# Data Ingestion
##################################
# read training data - It is the aircraft engine run-to-failure data.
train_df = pd.read_csv('../../Dataset/PM_train.txt', sep=" ", header=None)
train_df.drop(train_df.columns[[26, 27]], axis=1, inplace=True)
train_df.columns = ['id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
train_df = train_df.sort_values(['id','cycle'])
# read test data - It is the aircraft engine operating data without failure events recorded.
test_df = pd.read_csv('../../Dataset/PM_test.txt', sep=" ", header=None)
test_df.drop(test_df.columns[[26, 27]], axis=1, inplace=True)
test_df.columns = ['id', 'cycle', 'setting1', 'setting2', 'setting3', 's1', 's2', 's3',
's4', 's5', 's6', 's7', 's8', 's9', 's10', 's11', 's12', 's13', 's14',
's15', 's16', 's17', 's18', 's19', 's20', 's21']
# read ground truth data - It contains the information of true remaining cycles for each engine in the testing data.
truth_df = pd.read_csv('../../Dataset/PM_truth.txt', sep=" ", header=None)
truth_df.drop(truth_df.columns[[1]], axis=1, inplace=True)
##################################
# Data Preprocessing
##################################
#######
# TRAIN
#######
# Data Labeling - generate column RUL(Remaining Usefull Life or Time to Failure)
rul = pd.DataFrame(train_df.groupby('id')['cycle'].max()).reset_index()
rul.columns = ['id', 'max']
train_df = train_df.merge(rul, on=['id'], how='left')
train_df['RUL'] = train_df['max'] - train_df['cycle']
train_df.drop('max', axis=1, inplace=True)
# generate label columns for training data
# we will only make use of "label1" for binary classification,
# while trying to answer the question: is a specific engine going to fail within w1 cycles?
w1 = 30
w0 = 15
train_df['label1'] = np.where(train_df['RUL'] <= w1, 1, 0 )
train_df['label2'] = train_df['label1']
train_df.loc[train_df['RUL'] <= w0, 'label2'] = 2
# MinMax normalization (from 0 to 1)
train_df['cycle_norm'] = train_df['cycle']
cols_normalize = train_df.columns.difference(['id','cycle','RUL','label1','label2'])
min_max_scaler = preprocessing.MinMaxScaler()
norm_train_df = pd.DataFrame(min_max_scaler.fit_transform(train_df[cols_normalize]),
columns=cols_normalize,
index=train_df.index)
join_df = train_df[train_df.columns.difference(cols_normalize)].join(norm_train_df)
train_df = join_df.reindex(columns = train_df.columns)
######
# TEST
######
# MinMax normalization (from 0 to 1)
test_df['cycle_norm'] = test_df['cycle']
norm_test_df = pd.DataFrame(min_max_scaler.transform(test_df[cols_normalize]),
columns=cols_normalize,
index=test_df.index)
test_join_df = test_df[test_df.columns.difference(cols_normalize)].join(norm_test_df)
test_df = test_join_df.reindex(columns = test_df.columns)
test_df = test_df.reset_index(drop=True)
# We use the ground truth dataset to generate labels for the test data.
# generate column max for test data
rul = pd.DataFrame(test_df.groupby('id')['cycle'].max()).reset_index()
rul.columns = ['id', 'max']
truth_df.columns = ['more']
truth_df['id'] = truth_df.index + 1
truth_df['max'] = rul['max'] + truth_df['more']
truth_df.drop('more', axis=1, inplace=True)
# generate RUL for test data
test_df = test_df.merge(truth_df, on=['id'], how='left')
test_df['RUL'] = test_df['max'] - test_df['cycle']
test_df.drop('max', axis=1, inplace=True)
# generate label columns w0 and w1 for test data
test_df['label1'] = np.where(test_df['RUL'] <= w1, 1, 0 )
test_df['label2'] = test_df['label1']
test_df.loc[test_df['RUL'] <= w0, 'label2'] = 2
##################################
# LSTM
##################################
# pick a large window size of 50 cycles
sequence_length = 50
# function to reshape features into (samples, time steps, features)
def gen_sequence(id_df, seq_length, seq_cols):
""" Only sequences that meet the window-length are considered, no padding is used. This means for testing
we need to drop those which are below the window-length. An alternative would be to pad sequences so that
we can use shorter ones """
# for one id I put all the rows in a single matrix
data_matrix = id_df[seq_cols].values
num_elements = data_matrix.shape[0]
# Iterate over two lists in parallel.
# For example id1 have 192 rows and sequence_length is equal to 50
# so zip iterate over two following list of numbers (0,112),(50,192)
# 0 50 -> from row 0 to row 50
# 1 51 -> from row 1 to row 51
# 2 52 -> from row 2 to row 52
# ...
# 111 191 -> from row 111 to 191
for start, stop in zip(range(0, num_elements-seq_length), range(seq_length, num_elements)):
yield data_matrix[start:stop, :]
# pick the feature columns
sensor_cols = ['s' + str(i) for i in range(1,22)]
sequence_cols = ['setting1', 'setting2', 'setting3', 'cycle_norm']
sequence_cols.extend(sensor_cols)
# generator for the sequences
seq_gen = (list(gen_sequence(train_df[train_df['id']==id], sequence_length, sequence_cols))
for id in train_df['id'].unique())
# generate sequences and convert to numpy array
seq_array = np.concatenate(list(seq_gen)).astype(np.float32)
seq_array.shape
# function to generate labels
def gen_labels(id_df, seq_length, label):
# For one id I put all the labels in a single matrix.
# For example:
# [[1]
# [4]
# [1]
# [5]
# [9]
# ...
# [200]]
data_matrix = id_df[label].values
num_elements = data_matrix.shape[0]
# I have to remove the first seq_length labels
# because for one id the first sequence of seq_length size have as target
# the last label (the previus ones are discarded).
# All the next id's sequences will have associated step by step one label as target.
return data_matrix[seq_length:num_elements, :]
# generate labels
label_gen = [gen_labels(train_df[train_df['id']==id], sequence_length, ['label1'])
for id in train_df['id'].unique()]
label_array = np.concatenate(label_gen).astype(np.float32)
label_array.shape
# Next, we build a deep network.
# The first layer is an LSTM layer with 100 units followed by another LSTM layer with 50 units.
# Dropout is also applied after each LSTM layer to control overfitting.
# Final layer is a Dense output layer with single unit and sigmoid activation since this is a binary classification problem.
# build the network
nb_features = seq_array.shape[2]
nb_out = label_array.shape[1]
model = Sequential()
model.add(LSTM(
input_shape=(sequence_length, nb_features),
units=100,
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
units=50,
return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(units=nb_out, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
# fit the network
history = model.fit(seq_array, label_array, epochs=100, batch_size=200, validation_split=0.05, verbose=2,
callbacks = [keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='min'),
keras.callbacks.ModelCheckpoint(model_path,monitor='val_loss', save_best_only=True, mode='min', verbose=0)]
)
# list all data in history
print(history.history.keys())
# summarize history for Accuracy
fig_acc = plt.figure(figsize=(10, 10))
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
fig_acc.savefig("../../Output/model_accuracy.png")
# summarize history for Loss
fig_acc = plt.figure(figsize=(10, 10))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
fig_acc.savefig("../../Output/model_loss.png")
# training metrics
scores = model.evaluate(seq_array, label_array, verbose=1, batch_size=200)
print('Accurracy: {}'.format(scores[1]))
# make predictions and compute confusion matrix
y_pred = model.predict_classes(seq_array,verbose=1, batch_size=200)
y_true = label_array
test_set = pd.DataFrame(y_pred)
test_set.to_csv('../../Output/binary_submit_train.csv', index = None)
print('Confusion matrix\n- x-axis is true labels.\n- y-axis is predicted labels')
cm = confusion_matrix(y_true, y_pred)
print(cm)
# compute precision and recall
precision = precision_score(y_true, y_pred)
recall = recall_score(y_true, y_pred)
print( 'precision = ', precision, '\n', 'recall = ', recall)
##################################
# EVALUATE ON TEST DATA
##################################
# We pick the last sequence for each id in the test data
seq_array_test_last = [test_df[test_df['id']==id][sequence_cols].values[-sequence_length:]
for id in test_df['id'].unique() if len(test_df[test_df['id']==id]) >= sequence_length]
seq_array_test_last = np.asarray(seq_array_test_last).astype(np.float32)
print("seq_array_test_last")
print(seq_array_test_last)
print(seq_array_test_last.shape)
# Similarly, we pick the labels
#print("y_mask")
# serve per prendere solo le label delle sequenze che sono almeno lunghe 50
y_mask = [len(test_df[test_df['id']==id]) >= sequence_length for id in test_df['id'].unique()]
print("y_mask")
print(y_mask)
label_array_test_last = test_df.groupby('id')['label1'].nth(-1)[y_mask].values
label_array_test_last = label_array_test_last.reshape(label_array_test_last.shape[0],1).astype(np.float32)
print(label_array_test_last.shape)
print("label_array_test_last")
print(label_array_test_last)
# if best iteration's model was saved then load and use it
if os.path.isfile(model_path):
estimator = load_model(model_path)
# test metrics
scores_test = estimator.evaluate(seq_array_test_last, label_array_test_last, verbose=2)
print('Accurracy: {}'.format(scores_test[1]))
# make predictions and compute confusion matrix
y_pred_test = estimator.predict_classes(seq_array_test_last)
y_true_test = label_array_test_last
test_set = pd.DataFrame(y_pred_test)
test_set.to_csv('../../Output/binary_submit_test.csv', index = None)
print('Confusion matrix\n- x-axis is true labels.\n- y-axis is predicted labels')
cm = confusion_matrix(y_true_test, y_pred_test)
print(cm)
# compute precision and recall
precision_test = precision_score(y_true_test, y_pred_test)
recall_test = recall_score(y_true_test, y_pred_test)
f1_test = 2 * (precision_test * recall_test) / (precision_test + recall_test)
print( 'Precision: ', precision_test, '\n', 'Recall: ', recall_test,'\n', 'F1-score:', f1_test )
# Plot in blue color the predicted data and in green color the
# actual data to verify visually the accuracy of the model.
fig_verify = plt.figure(figsize=(100, 50))
plt.plot(y_pred_test, color="blue")
plt.plot(y_true_test, color="green")
plt.title('prediction')
plt.ylabel('value')
plt.xlabel('row')
plt.legend(['predicted', 'actual data'], loc='upper left')
plt.show()
fig_verify.savefig("../../Output/model_verify.png") | mit |
henrykironde/scikit-learn | examples/semi_supervised/plot_label_propagation_digits.py | 268 | 2723 | """
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
| bsd-3-clause |
perimosocordiae/scipy | doc/source/tutorial/stats/plots/kde_plot3.py | 12 | 1249 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
rng = np.random.default_rng()
x1 = rng.normal(size=200) # random data, normal distribution
xs = np.linspace(x1.min()-1, x1.max()+1, 200)
kde1 = stats.gaussian_kde(x1)
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(211)
ax1.plot(x1, np.zeros(x1.shape), 'b+', ms=12) # rug plot
ax1.plot(xs, kde1(xs), 'k-', label="Scott's Rule")
ax1.plot(xs, kde2(xs), 'b-', label="Silverman's Rule")
ax1.plot(xs, stats.norm.pdf(xs), 'r--', label="True PDF")
ax1.set_xlabel('x')
ax1.set_ylabel('Density')
ax1.set_title("Normal (top) and Student's T$_{df=5}$ (bottom) distributions")
ax1.legend(loc=1)
x2 = stats.t.rvs(5, size=200, random_state=rng) # random data, T distribution
xs = np.linspace(x2.min() - 1, x2.max() + 1, 200)
kde3 = stats.gaussian_kde(x2)
kde4 = stats.gaussian_kde(x2, bw_method='silverman')
ax2 = fig.add_subplot(212)
ax2.plot(x2, np.zeros(x2.shape), 'b+', ms=12) # rug plot
ax2.plot(xs, kde3(xs), 'k-', label="Scott's Rule")
ax2.plot(xs, kde4(xs), 'b-', label="Silverman's Rule")
ax2.plot(xs, stats.t.pdf(xs, 5), 'r--', label="True PDF")
ax2.set_xlabel('x')
ax2.set_ylabel('Density')
plt.show()
| bsd-3-clause |
jdrudolph/scikit-bio | skbio/stats/distance/_bioenv.py | 12 | 9577 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from itertools import combinations
import numpy as np
import pandas as pd
from scipy.spatial.distance import pdist
from scipy.stats import spearmanr
from skbio.stats.distance import DistanceMatrix
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def bioenv(distance_matrix, data_frame, columns=None):
"""Find subset of variables maximally correlated with distances.
Finds subsets of variables whose Euclidean distances (after scaling the
variables; see Notes section below for details) are maximally
rank-correlated with the distance matrix. For example, the distance matrix
might contain distances between communities, and the variables might be
numeric environmental variables (e.g., pH). Correlation between the
community distance matrix and Euclidean environmental distance matrix is
computed using Spearman's rank correlation coefficient (:math:`\\rho`).
Subsets of environmental variables range in size from 1 to the total number
of variables (inclusive). For example, if there are 3 variables, the "best"
variable subsets will be computed for subset sizes 1, 2, and 3.
The "best" subset is chosen by computing the correlation between the
community distance matrix and all possible Euclidean environmental distance
matrices at the given subset size. The combination of environmental
variables with maximum correlation is chosen as the "best" subset.
Parameters
----------
distance_matrix : DistanceMatrix
Distance matrix containing distances between objects (e.g., distances
between samples of microbial communities).
data_frame : pandas.DataFrame
Contains columns of variables (e.g., numeric environmental variables
such as pH) associated with the objects in `distance_matrix`. Must be
indexed by the IDs in `distance_matrix` (i.e., the row labels must be
distance matrix IDs), but the order of IDs between `distance_matrix`
and `data_frame` need not be the same. All IDs in the distance matrix
must be present in `data_frame`. Extra IDs in `data_frame` are allowed
(they are ignored in the calculations).
columns : iterable of strs, optional
Column names in `data_frame` to include as variables in the
calculations. If not provided, defaults to all columns in `data_frame`.
The values in each column must be numeric or convertible to a numeric
type.
Returns
-------
pandas.DataFrame
Data frame containing the "best" subset of variables at each subset
size, as well as the correlation coefficient of each.
Raises
------
TypeError
If invalid input types are provided, or if one or more specified
columns in `data_frame` are not numeric.
ValueError
If column name(s) or `distance_matrix` IDs cannot be found in
`data_frame`, if there is missing data (``NaN``) in the environmental
variables, or if the environmental variables cannot be scaled (e.g.,
due to zero variance).
See Also
--------
scipy.stats.spearmanr
Notes
-----
See [1]_ for the original method reference (originally called BIO-ENV).
The general algorithm and interface are similar to ``vegan::bioenv``,
available in R's vegan package [2]_. This method can also be found in
PRIMER-E [3]_ (originally called BIO-ENV, but is now called BEST).
.. warning:: This method can take a *long* time to run if a large number of
variables are specified, as all possible subsets are evaluated at each
subset size.
The variables are scaled before computing the Euclidean distance: each
column is centered and then scaled by its standard deviation.
References
----------
.. [1] Clarke, K. R & Ainsworth, M. 1993. "A method of linking multivariate
community structure to environmental variables". Marine Ecology Progress
Series, 92, 205-219.
.. [2] http://cran.r-project.org/web/packages/vegan/index.html
.. [3] http://www.primer-e.com/primer.htm
Examples
--------
Import the functionality we'll use in the following examples:
>>> import pandas as pd
>>> from skbio import DistanceMatrix
>>> from skbio.stats.distance import bioenv
Load a 4x4 community distance matrix:
>>> dm = DistanceMatrix([[0.0, 0.5, 0.25, 0.75],
... [0.5, 0.0, 0.1, 0.42],
... [0.25, 0.1, 0.0, 0.33],
... [0.75, 0.42, 0.33, 0.0]],
... ['A', 'B', 'C', 'D'])
Load a ``pandas.DataFrame`` with two environmental variables, pH and
elevation:
>>> df = pd.DataFrame([[7.0, 400],
... [8.0, 530],
... [7.5, 450],
... [8.5, 810]],
... index=['A','B','C','D'],
... columns=['pH', 'Elevation'])
Note that the data frame is indexed with the same IDs (``'A'``, ``'B'``,
``'C'``, and ``'D'``) that are in the distance matrix. This is necessary in
order to link the environmental variables (metadata) to each of the objects
in the distance matrix. In this example, the IDs appear in the same order
in both the distance matrix and data frame, but this is not necessary.
Find the best subsets of environmental variables that are correlated with
community distances:
>>> bioenv(dm, df) # doctest: +NORMALIZE_WHITESPACE
size correlation
vars
pH 1 0.771517
pH, Elevation 2 0.714286
We see that in this simple example, pH alone is maximally rank-correlated
with the community distances (:math:`\\rho=0.771517`).
"""
if not isinstance(distance_matrix, DistanceMatrix):
raise TypeError("Must provide a DistanceMatrix as input.")
if not isinstance(data_frame, pd.DataFrame):
raise TypeError("Must provide a pandas.DataFrame as input.")
if columns is None:
columns = data_frame.columns.values.tolist()
if len(set(columns)) != len(columns):
raise ValueError("Duplicate column names are not supported.")
if len(columns) < 1:
raise ValueError("Must provide at least one column.")
for column in columns:
if column not in data_frame:
raise ValueError("Column '%s' not in data frame." % column)
# Subset and order the vars data frame to match the IDs in the distance
# matrix, only keeping the specified columns.
vars_df = data_frame.loc[distance_matrix.ids, columns]
if vars_df.isnull().any().any():
raise ValueError("One or more IDs in the distance matrix are not "
"in the data frame, or there is missing data in the "
"data frame.")
try:
vars_df = vars_df.astype(float)
except ValueError:
raise TypeError("All specified columns in the data frame must be "
"numeric.")
# Scale the vars and extract the underlying numpy array from the data
# frame. We mainly do this for performance as we'll be taking subsets of
# columns within a tight loop and using a numpy array ends up being ~2x
# faster.
vars_array = _scale(vars_df).values
dm_flat = distance_matrix.condensed_form()
num_vars = len(columns)
var_idxs = np.arange(num_vars)
# For each subset size, store the best combination of variables:
# (string identifying best vars, subset size, rho)
max_rhos = np.empty(num_vars, dtype=[('vars', object),
('size', int),
('correlation', float)])
for subset_size in range(1, num_vars + 1):
max_rho = None
for subset_idxs in combinations(var_idxs, subset_size):
# Compute Euclidean distances using the current subset of
# variables. pdist returns the distances in condensed form.
vars_dm_flat = pdist(vars_array[:, subset_idxs],
metric='euclidean')
rho = spearmanr(dm_flat, vars_dm_flat)[0]
# If there are ties for the best rho at a given subset size, choose
# the first one in order to match vegan::bioenv's behavior.
if max_rho is None or rho > max_rho[0]:
max_rho = (rho, subset_idxs)
vars_label = ', '.join([columns[i] for i in max_rho[1]])
max_rhos[subset_size - 1] = (vars_label, subset_size, max_rho[0])
return pd.DataFrame.from_records(max_rhos, index='vars')
def _scale(df):
"""Center and scale each column in a data frame.
Each column is centered (by subtracting the mean) and then scaled by its
standard deviation.
"""
# Modified from http://stackoverflow.com/a/18005745
df = df.copy()
df -= df.mean()
df /= df.std()
if df.isnull().any().any():
raise ValueError("Column(s) in the data frame could not be scaled, "
"likely because the column(s) had no variance.")
return df
| bsd-3-clause |
mjgrav2001/scikit-learn | setup.py | 143 | 7364 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# License: 3-clause BSD
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = 'amueller@ais.uni-bonn.de'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if (filename.endswith('.so') or filename.endswith('.pyd')
or filename.endswith('.dll')
or filename.endswith('.pyc')):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
def is_scipy_installed():
try:
import scipy
except ImportError:
return False
return True
def is_numpy_installed():
try:
import numpy
except ImportError:
return False
return True
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if (len(sys.argv) >= 2
and ('--help' in sys.argv[1:] or sys.argv[1]
in ('--help-commands', 'egg_info', '--version', 'clean'))):
# For these actions, NumPy is not required.
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
if is_numpy_installed() is False:
raise ImportError("Numerical Python (NumPy) is not installed.\n"
"scikit-learn requires NumPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if is_scipy_installed() is False:
raise ImportError("Scientific Python (SciPy) is not installed.\n"
"scikit-learn requires SciPy.\n"
"Installation instructions are available on scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
from numpy.distutils.core import setup
metadata['configuration'] = configuration
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
jorge2703/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 155 | 8058 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed, [])
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
tafdata/cardinal | app/organizer/jyoriku.py | 1 | 3647 | import numpy as np
import mojimoji
import pandas as pd
from django.db.models.aggregates import Count
from django.db.models import Max
from django.core.exceptions import ObjectDoesNotExist
# Models
from competitions.models import Comp, Event, EventStatus, GR as GRecord
from organizer.models import Entry
from organizer.templatetags.organizer_tags import format_mark
from organizer.templatetags.organizer_filters import zen_to_han, sex_to_ja, race_section_to_ja
"""
上陸連携 ツール
"""
class JyorikuTool:
def __init__(self, comp):
self.comp = comp # Comp オブジェクト
"""
Cardinal System: BackUp用CSV
"""
def start_list_cardinal(self):
self.columns = ['section', 'sex', 'round', 'group', 'order_lane', 'event', 'bib', 'name', 'kana', 'grade', 'club', 'jaaf_branch', 'PB', 'entry_status']
df = pd.DataFrame(columns=self.columns)
# Entry オブジェクトの取得
entries = Entry.objects.filter(event_status__comp=self.comp).order_by('event_status__event__name', '-event_status__section', 'event_status__event__sex', 'group', 'order_lane')
for entry in entries:
# print(entry)
# 性別
if entry.sex == 'M': sex = "男"
elif entry.sex == 'W': sex = "女"
else: sex = ""
# 学年
if entry.grade:grade = entry.grade
else: grade = ""
# 所属チーム
if entry.club: club = entry.club
else: club = ""
# PB
if entry.personal_best: pb = entry.personal_best
else: pb = ""
# Pandas Seriesを作成
series = pd.Series([
entry.event_status.section,
sex,
entry.event_status.match_round,
entry.group,
entry.order_lane,
entry.event_status.event.name,
entry.bib,
entry.name_family+"\u3000"+entry.name_first,
mojimoji.zen_to_han(entry.kana_family+"\u3000"+entry.kana_first),
grade,
club,
entry.jaaf_branch,
pb,
entry.entry_status,
],index=self.columns)
df = df.append(series, ignore_index = True)
# d-type変換
df[["group","order_lane"]]=df[["group","order_lane"]].astype(int)
# print(df.head())
# print(df.shape)
return df
"""
上陸用スタートリスト
"""
def start_list_jyoriku(self):
df = self.start_list_cardinal()
# print(df[df['group'] < 0].index)
df = df.drop(df[df['group'] < 0].index)
print(df.head(20))
# 上陸形式に変換
## 空白
df["space1"] = ["" for i in range(len(df))]
df["space2"] = ["" for i in range(len(df))]
# print(df.columns)
## 部門
for i in df[df["section"] == 'VS'].index:
df.ix[i,"section"] = '対校'
df["section"] = df["sex"] + df["section"]
## ゼッケン番号
# print(df[df['bib'].str.find('-') >= 0])
for i in df[df['bib'].str.find('-') >= 0].index:
df.ix[i, 'bib'] = str(df.ix[i, 'bib']).replace('-', '')[:5]
## 所属カナ
df["club_kana"] = ["" for i in range(len(df))]
# 必要カラムの選択
df = df.ix[:,['section', 'space1', 'event', 'group', 'order_lane', 'space2', 'bib', 'name', 'kana', 'grade', 'club', 'club_kana', 'jaaf_branch']]
# print(df)
return df
| mit |
jzt5132/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
timqian/sms-tools | lectures/8-Sound-transformations/plots-code/hps-morph-total.py | 24 | 3956 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/transformations/'))
import hpsModel as HPS
import hpsTransformations as HPST
import harmonicTransformations as HT
import utilFunctions as UF
inputFile1='../../../sounds/violin-B3.wav'
window1='blackman'
M1=1001
N1=1024
t1=-100
minSineDur1=0.05
nH=40
minf01=200
maxf01=300
f0et1=10
harmDevSlope1=0.01
stocf=0.2
inputFile2='../../../sounds/soprano-E4.wav'
window2='blackman'
M2=901
N2=1024
t2=-100
minSineDur2=0.05
minf02=250
maxf02=500
f0et2=10
harmDevSlope2=0.01
Ns = 512
H = 128
(fs1, x1) = UF.wavread(inputFile1)
(fs2, x2) = UF.wavread(inputFile2)
w1 = get_window(window1, M1)
w2 = get_window(window2, M2)
hfreq1, hmag1, hphase1, stocEnv1 = HPS.hpsModelAnal(x1, fs1, w1, N1, H, t1, nH, minf01, maxf01, f0et1, harmDevSlope1, minSineDur1, Ns, stocf)
hfreq2, hmag2, hphase2, stocEnv2 = HPS.hpsModelAnal(x2, fs2, w2, N2, H, t2, nH, minf02, maxf02, f0et2, harmDevSlope2, minSineDur2, Ns, stocf)
hfreqIntp = np.array([0, 0, .1, 0, .9, 1, 1, 1])
hmagIntp = np.array([0, 0, .1, 0, .9, 1, 1, 1])
stocIntp = np.array([0, 0, .1, 0, .9, 1, 1, 1])
yhfreq, yhmag, ystocEnv = HPST.hpsMorph(hfreq1, hmag1, stocEnv1, hfreq2, hmag2, stocEnv2, hfreqIntp, hmagIntp, stocIntp)
y, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs1)
UF.wavwrite(y,fs1, 'hps-morph-total.wav')
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 15000.0
# plot spectrogram stochastic component of sound 1
plt.subplot(3,1,1)
numFrames = int(stocEnv1[:,0].size)
sizeEnv = int(stocEnv1[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs1)
binFreq = (.5*fs1)*np.arange(sizeEnv*maxplotfreq/(.5*fs1))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(stocEnv1[:,:sizeEnv*maxplotfreq/(.5*fs1)+1]))
plt.autoscale(tight=True)
# plot harmonic on top of stochastic spectrogram of sound 1
harms = hfreq1*np.less(hfreq1,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs1)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('x1 (violin-B3.wav): harmonics + stochastic spectrogram')
# plot spectrogram stochastic component of sound 2
plt.subplot(3,1,2)
numFrames = int(stocEnv2[:,0].size)
sizeEnv = int(stocEnv2[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs2)
binFreq = (.5*fs2)*np.arange(sizeEnv*maxplotfreq/(.5*fs2))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(stocEnv2[:,:sizeEnv*maxplotfreq/(.5*fs2)+1]))
plt.autoscale(tight=True)
# plot harmonic on top of stochastic spectrogram of sound 2
harms = hfreq2*np.less(hfreq2,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs2)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('x2 (soprano-E4.wav): harmonics + stochastic spectrogram')
# plot spectrogram of transformed stochastic compoment
plt.subplot(3,1,3)
numFrames = int(ystocEnv[:,0].size)
sizeEnv = int(ystocEnv[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs1)
binFreq = (.5*fs1)*np.arange(sizeEnv*maxplotfreq/(.5*fs1))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(ystocEnv[:,:sizeEnv*maxplotfreq/(.5*fs1)+1]))
plt.autoscale(tight=True)
# plot transformed harmonic on top of stochastic spectrogram
harms = yhfreq*np.less(yhfreq,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs1)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.autoscale(tight=True)
plt.title('y: harmonics + stochastic spectrogram')
plt.tight_layout()
plt.savefig('hps-morph-total.png')
plt.show()
| agpl-3.0 |
SuperDARNCanada/placeholderOS | tools/testing_utils/filter_testing/filter_rawrf.py | 2 | 1174 | #
# Filter written rawrf data using remai filter.
#
# Then beamform and produce output_samples_iq
import json
import matplotlib
from scipy.fftpack import fft
import math
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import sys
import collections
import os
import deepdish
import argparse
import random
import traceback
sys.path.append(os.environ["BOREALISPATH"])
borealis_path = os.environ['BOREALISPATH']
config_file = borealis_path + '/config.ini'
def testing_parser():
"""
Creates the parser for this script.
:returns: parser, the argument parser for the testing script.
"""
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="The name of the rawrf file to filter and create output_samples from.")
return parser
def main():
parser = testing_parser()
args = parser.parse_args()
rawrf_file = args.filename
data_file_ext = rawrf_file.split('.')[-2]
if data_file_ext != 'rawrf':
raise Exception('Please provide a rawrf file.')
data = deepdish.io.load(rawrf_file)
# order the keys and get the first record.
if __name__ == '__main__':
main() | gpl-3.0 |
wazeerzulfikar/scikit-learn | examples/model_selection/plot_roc.py | 102 | 5056 | """
=======================================
Receiver Operating Characteristic (ROC)
=======================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
Multiclass settings
-------------------
ROC curves are typically used in binary classification to study the output of
a classifier. In order to extend ROC curve and ROC area to multi-class
or multi-label classification, it is necessary to binarize the output. One ROC
curve can be drawn per label, but one can also draw a ROC curve by considering
each element of the label indicator matrix as a binary prediction
(micro-averaging).
Another evaluation measure for multi-class classification is
macro-averaging, which gives equal weight to the classification of each
label.
.. note::
See also :func:`sklearn.metrics.roc_auc_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc_crossval.py`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
# Import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features to make the problem harder
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
# Learn to predict each class against the other
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
##############################################################################
# Plot of a ROC curve for a specific class
plt.figure()
lw = 2
plt.plot(fpr[2], tpr[2], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
##############################################################################
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
FordyceLab/AcqPack | acqpack/autosampler.py | 1 | 5202 | import numpy as np
import pandas as pd
import utils as ut
class Autosampler:
"""
A high-level wrapper that coordinates XY and Z axes to create an autosampler.
Incorporates a deck.
"""
def __init__(self, z, xy):
# TODO: ditch frames; just have position_tables, each of which stores should transforms be a property of the position table?
self.frames = pd.DataFrame(index=['trans', 'position_table'])
self.add_frame('hardware')
self.zh_travel = 0
self.Z = z # must be initialized first!!! (avoid collisions)
self.XY = xy
def add_frame(self, name, trans=np.eye(4,4), position_table=None):
"""
Adds coordinate frame. Frame requires affine transform to hardware coordinates; position_table optional.
:param name: (str) the name to be given to the frame (e.g. hardware)
:param trans: (np.ndarray <- str) xyzw affine transform matrix; if string, tries to load delimited file
:param position_table: (None | pd.DataFrame <- str) position_table; if string, tries to load delimited file
"""
if isinstance(trans, str):
trans = ut.read_delim_pd(trans).select_dtypes(['number']).values
if isinstance(position_table, str):
position_table = ut.read_delim_pd(position_table)
assert(isinstance(trans, np.ndarray)) # trans: numpy array of shape (4,4)
assert(trans.shape==(4,4)) # check size
assert(np.array_equal(np.linalg.norm(trans[:-1,:-1]),
np.linalg.norm(np.eye(3,3)))) # Frob norm rotation invariant (no scaling)
assert(trans[-1,-1] != 0) # cannot be singular matrix
# position_table: DataFrame with x,y,z OR None
if isinstance(position_table, pd.DataFrame):
assert(set(list('xyz')).issubset(position_table.columns)) # contains 'x','y','z' columns
else:
assert(position_table is None)
self.frames[name] = None
self.frames[name].trans = trans
self.frames[name].position_table = position_table
# ref_frame gives reference frame; its offset is ignored
def add_plate(self, name, filepath, ref_frame='deck'):
"""
TODO: UNDER DEVELOPMENT
"""
# move to bottom of first well, i.e. plate origin (using GUI)
# store offset (translation)
offset = self.where() # hardware_xyz - plate_xyz (0,0,0)
# determine transform
trans = self.frames[ref_frame].copy()
trans[-1] = offset
# add position_table - either:
# A) add from file
while True:
filepath = raw_input('Enter filepath to plate position_table:')
try:
position_table = ut.read_delim_pd(filepath)
break
except IOError:
print 'No file:', filepath
# add frame
self.add_frame(name, trans, position_table)
def where(self, frame=None):
"""
Retrieves current hardware (x,y,z). If frame is specified, transforms hardware coordinates into
frame's coordinates.
:param frame: (str) name of frame to specify transform (optional)
:return: (tup) current position
"""
where = self.XY.where_xy() + self.Z.where()
if frame is not None:
where += (1,)
x, y, z, _ = tuple(np.dot(where, np.linalg.inv(self.frames[frame].trans)))
where = x, y, z
return where
def home(self):
"""
Homes Z axis, then XY axes.
"""
self.Z.home()
self.XY.home_xy()
# TODO: if no columns specified, transform provided XYZ to hardware coordinates.
# TODO: default frame?
def goto(self, frame, lookup_columns, lookup_values, zh_travel=0):
"""
Finds lookup_values in lookup_columns of frame's position_list; retrieves corresponding X,Y,Z.
Transforms X,Y,Z to hardware X,Y,Z by frame's transform.
Moves to hardware X,Y,Z, taking into account zh_travel.
:param frame: (str) frame that specifies position_list and transform
:param lookup_columns: (str | list) column(s) to search in position_table
:param lookup_values: (val | list) values(s) to find in lookup_columns
:param zh_travel: (float) hardware height at which to travel
"""
trans, position_table = self.frames[frame]
if lookup_columns=='xyz':
lookup_values = tuple(lookup_values) + (1,)
xh, yh, zh, _ = np.dot(lookup_values, trans)
else:
xyz = tuple(ut.lookup(position_table, lookup_columns, lookup_values)[['x', 'y', 'z']].iloc[0])
xyzw = xyz + (1,) # concatenate for translation
xh, yh, zh, _ = np.dot(xyzw, trans) # get hardware coordinates
if zh_travel>0:
self.Z.goto(zh_travel)
elif self.zh_travel>0:
self.Z.goto(self.zh_travel)
else:
self.Z.home()
self.XY.goto_xy(xh, yh)
self.Z.goto(zh)
def exit(self):
"""
Send exit command to XY and Z
"""
self.XY.exit()
self.Z.exit()
| mit |
mantidproject/mantid | scripts/SCD_Reduction/SCDCalibratePanels2PanelDiagnostics.py | 3 | 15813 | #!/usr/bin/env python
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
"""
This module provides a new set of visualization tools to analyze the calibration
results from SCDCalibratePanels2 (on a per bank base).
"""
import os
import logging
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.colors import LogNorm, SymLogNorm
from mpl_toolkits.axes_grid1.inset_locator import InsetPosition
from typing import List, Tuple, Dict, Union
from mantid.dataobjects import PeaksWorkspace
from mantid.simpleapi import mtd
def bank_boxplot(
pltdata_eng: Dict[str, np.ndarray],
pltdata_cal: Dict[str, np.ndarray],
figsize: np.ndarray,
figname: str = None,
saveto: str = ".",
use_logscale: bool = True,
show_plots: bool = True,
) -> None:
"""
Generate scatter plots of chi2 computed based on the
given calibrated instrument.
Both Chi2(q) and Chi2(d) are used to evaluate the Chi2
results on each bank.
@param pltdata_eng: plot data collected at engineering position.
@param pltdata_cal: plot data collected at calibrated position.
@param figsize: figure size, e.g. (12, 4)
@param figname: figure name.
@param saveto: directory to save the output
@param use_logscale: use log scale to plot Chi2, default is True.
@param show_plots: show plots interactively
"""
# prep data
chi2qs_eng, chi2ds_eng, _, bn_num_eng = get_boxplot_data(pltdata_eng)
chi2qs_cal, chi2ds_cal, _, bn_num_cal = get_boxplot_data(pltdata_cal)
#
fig = plt.figure(figsize=figsize)
# -------
# qsample
# -------
ax_qs = fig.add_subplot(2, 1, 1)
#
box_eng = ax_qs.boxplot(chi2qs_eng, positions=bn_num_eng, notch=True, showfliers=False)
for _, lines in box_eng.items():
for line in lines:
line.set_color('b')
line.set_linestyle(":")
#
box_cal = ax_qs.boxplot(chi2qs_cal, positions=bn_num_cal, notch=True, showfliers=False)
for _, lines in box_cal.items():
for line in lines:
line.set_color('r')
#
ax_qs.set_ylabel(r'$\chi^2(Q)$')
ax_qs.set_title(r'Goodness of fit')
if use_logscale:
ax_qs.set_yscale("log")
ax_qs.legend(
[box_eng["boxes"][0], box_cal["boxes"][0]],
["engineering", "calibration"],
loc='lower right',
)
# --------
# dspacing
# --------
ax_ds = fig.add_subplot(2, 1, 2)
box_eng = ax_ds.boxplot(chi2ds_eng, positions=bn_num_eng, notch=True, showfliers=False)
for _, lines in box_eng.items():
for line in lines:
line.set_color('b')
line.set_linestyle(":")
box_cal = ax_ds.boxplot(chi2ds_cal, positions=bn_num_cal, notch=True, showfliers=False)
for _, lines in box_cal.items():
for line in lines:
line.set_color('r')
ax_ds.set_xlabel(r'[bank no.]')
ax_ds.set_ylabel(r'$\chi^2(d)$')
if use_logscale:
ax_ds.set_yscale("log")
ax_ds.legend(
[box_eng["boxes"][0], box_cal["boxes"][0]],
["engineering", "calibration"],
loc='lower right',
)
# save figure
# NOTE: save first, then display to avoid weird corruption of written figure
fig.savefig(os.path.join(saveto, figname))
# display
if show_plots:
fig.show()
# notify users
logging.info(f"Figure {figname} is saved to {saveto}.")
def get_boxplot_data(pltdata: Dict[str, np.ndarray]) -> Tuple[list, list, np.ndarray, np.ndarray]:
"""
Helper function to organize the plot data for box plot
@param pltdata: input dict containing plot data
@return chi2qs: list
chi2ds: list
bn_str: np.ndarray (1, N_bank)
bn_num: np.ndarray (1, N_bank)
"""
bn_str = np.unique(pltdata["banknames"])
bn_num = np.array([int(bn.replace("bank", "")) for bn in bn_str])
chi2qs = [pltdata["chi2_qsample"][np.where(pltdata["banknames"] == bn)] for bn in bn_str]
chi2ds = [pltdata["chi2_dspacing"][np.where(pltdata["banknames"] == bn)] for bn in bn_str]
return chi2qs, chi2ds, bn_str, bn_num
def bank_overlay(
pltdata_eng: Dict[str, np.ndarray],
pltdata_cal: Dict[str, np.ndarray],
figsize: np.ndarray,
generate_report: bool = True,
figname: str = None,
saveto: str = ".",
use_logscale: bool = True,
show_plots: bool = True,
) -> None:
"""
Plot the Chi2_QSample directly on top of bank to visualize how calibration is affecting
different regions of the bank.
@param pltdata_eng: plot data collected at engineering position.
@param pltdata_cal: plot data collected at calibrated position.
@param figsize: figure size, e.g. (14, 10)
@param generate_report: combine all figures into one PDF file
@param figname: figure name.
@param saveto: directory to save the output
@param use_logscale: use log scale to plot Chi2, default is True.
@param show_plots: show plots interactively
"""
# prep
if generate_report:
pp = PdfPages(os.path.join(saveto, figname))
else:
figname = os.path.join(saveto, "{}_" + figname)
# compute the delta
pltdata_delta = calc_overlay_delta(
pltdata_eng=pltdata_eng,
pltdata_cal=pltdata_cal,
)
# get per bank data
bn_str = np.unique(pltdata_eng["banknames"])
for bn in bn_str:
# engineering
chi2qs_eng = pltdata_eng["chi2_qsample"][np.where(pltdata_eng["banknames"] == bn)]
row_eng = pltdata_eng["rows"][np.where(pltdata_eng["banknames"] == bn)]
col_eng = pltdata_eng["cols"][np.where(pltdata_eng["banknames"] == bn)]
# calibration
chi2qs_cal = pltdata_cal["chi2_qsample"][np.where(pltdata_cal["banknames"] == bn)]
row_cal = pltdata_cal["rows"][np.where(pltdata_cal["banknames"] == bn)]
col_cal = pltdata_cal["cols"][np.where(pltdata_cal["banknames"] == bn)]
#
chi2qs_min = min(chi2qs_eng.min(), chi2qs_cal.min())
chi2qs_max = max(chi2qs_eng.max(), chi2qs_cal.max())
# delta
chi2qs_delta = pltdata_delta["chi2_qsample"][np.where(pltdata_delta["banknames"] == bn)]
row_delta = pltdata_delta["rows"][np.where(pltdata_delta["banknames"] == bn)]
col_delta = pltdata_delta["cols"][np.where(pltdata_delta["banknames"] == bn)]
delta_range = max(abs(chi2qs_delta.min()), abs(chi2qs_delta.max())) * 10
# plotting
fig, (ax_eng, ax_cal, cax, ax_delta, cax_delta) = plt.subplots(
ncols=5,
figsize=figsize,
gridspec_kw={"width_ratios": [1, 1, 0.05, 1, 0.05]},
)
fig.suptitle(bn)
ax_eng.set_title(r"$\chi^2(Q)_{eng}$")
ax_cal.set_title(r"$\chi^2(Q)_{cal}$")
ax_delta.set_title(r"$\chi^2(Q)_{cal} - \chi^2(Q)_{eng}$")
fig.subplots_adjust(wspace=0.3)
if use_logscale:
view_eng = ax_eng.scatter(col_eng, row_eng, c=chi2qs_eng, vmin=chi2qs_min, vmax=chi2qs_max, norm=LogNorm())
_ = ax_cal.scatter(col_cal, row_cal, c=chi2qs_cal, vmin=chi2qs_min, vmax=chi2qs_max, norm=LogNorm())
view_delta = ax_delta.scatter(col_delta,
row_delta,
c=chi2qs_delta,
vmin=-delta_range,
vmax=delta_range,
norm=SymLogNorm(linthresh=abs(chi2qs_delta.min()) / 10),
cmap="bwr")
else:
view_eng = ax_eng.scatter(col_eng, row_eng, c=chi2qs_eng, vmin=chi2qs_min, vmax=chi2qs_max)
_ = ax_cal.scatter(col_cal, row_cal, c=chi2qs_cal, vmin=chi2qs_min, vmax=chi2qs_max)
view_delta = ax_delta.scatter(col_delta, row_delta, c=chi2qs_delta, vmin=-delta_range, vmax=delta_range, cmap="bwr")
# colorbar 1
ip = InsetPosition(ax_cal, [1.05, 0, 0.05, 1])
cax.set_axes_locator(ip)
fig.colorbar(view_eng, cax=cax, ax=[ax_eng, ax_cal])
# colorbar 2
ip2 = InsetPosition(ax_delta, [1.05, 0, 0.05, 1])
cax_delta.set_axes_locator(ip2)
fig.colorbar(view_delta, cax=cax_delta, ax=[ax_delta])
# save the image
if generate_report:
fig.savefig(pp, format="pdf")
else:
fig.savefig(figname.format(bn))
# display
if show_plots:
fig.show()
# close file handle if necessary
if generate_report:
pp.close()
def calc_overlay_delta(
pltdata_eng: Dict[str, np.ndarray],
pltdata_cal: Dict[str, np.ndarray],
) -> Dict[str, np.ndarray]:
"""
Return a dictionary containing the pair wise relative difference
@param pltdata_eng: plot data collected at engineering position.
@param pltdata_cal: plot data collected at calibrated position.
@returns: dict
"""
pltdata_delta = {}
detids = np.intersect1d(pltdata_eng["detid"], pltdata_cal["detid"])
for lb in ["banknames", "cols", "rows"]:
pltdata_delta[lb] = np.array([pltdata_eng[lb][np.where(pltdata_eng["detid"] == detid)][0] for detid in detids])
# compute the delta
qs_eng = [pltdata_eng["chi2_qsample"][np.where(pltdata_eng["detid"] == detid)][0] for detid in detids]
qs_cal = [pltdata_cal["chi2_qsample"][np.where(pltdata_cal["detid"] == detid)][0] for detid in detids]
pltdata_delta["chi2_qsample"] = np.array([(q1 - q0) for q0, q1 in zip(qs_eng, qs_cal)])
return pltdata_delta
def get_plot_data(
pws: PeaksWorkspace,
banknames: List[str],
) -> Dict[str, np.ndarray]:
"""
Gather data for Panel diagnostics
@param pws: target PeaksWorkspace, must have an instrument attached
@param banknames: list of bank names to gather
"""
pltdata = {}
oriented_lattice = pws.sample().getOrientedLattice()
# det info
pltdata["banknames"] = np.array([pws.row(i)["BankName"] for i in range(pws.getNumberPeaks()) if pws.row(i)["BankName"] in banknames])
pltdata["cols"] = np.array([pws.row(i)["Col"] for i in range(pws.getNumberPeaks()) if pws.row(i)["BankName"] in banknames])
pltdata["rows"] = np.array([pws.row(i)["Row"] for i in range(pws.getNumberPeaks()) if pws.row(i)["BankName"] in banknames])
pltdata["detid"] = np.array([pws.row(i)["DetID"] for i in range(pws.getNumberPeaks()) if pws.row(i)["BankName"] in banknames])
# compute chi2_qsample
qsample = [pws.getPeak(i).getQSampleFrame() for i in range(pws.getNumberPeaks()) if pws.row(i)["BankName"] in banknames]
qsample_ideal = [
np.array(oriented_lattice.qFromHKL(pws.getPeak(i).getIntHKL())) for i in range(pws.getNumberPeaks())
if pws.row(i)["BankName"] in banknames
]
pltdata["chi2_qsample"] = np.array([((qs - qs0)**2 / np.linalg.norm(qs0)**2).sum() for qs, qs0 in zip(qsample, qsample_ideal)])
# compute chi2_dspacing
dspacing = [pws.getPeak(i).getDSpacing() for i in range(pws.getNumberPeaks()) if pws.row(i)["BankName"] in banknames]
dspacing_ideal = [
oriented_lattice.d(pws.getPeak(i).getIntHKL()) for i in range(pws.getNumberPeaks()) if pws.row(i)["BankName"] in banknames
]
pltdata["chi2_dspacing"] = np.array([(d / d0 - 1)**2 for d, d0 in zip(dspacing, dspacing_ideal)])
return pltdata
def SCDCalibratePanels2PanelDiagnosticsPlot(
peaksWorkspace_engineering: Union[PeaksWorkspace, str],
peaksWorkspace_calibrated: Union[PeaksWorkspace, str],
banknames: Union[str, List[str]] = None,
mode: str = "boxplot",
generate_report: bool = True,
use_logscale: bool = True,
config: Dict[str, str] = {
"prefix": "fig",
"type": "png",
"saveto": ".",
},
showPlots: bool = True,
) -> List[Dict[str, np.ndarray]]:
"""
Visualization of the diagnostic for SCDCalibratePanels2 on a per
panel (bank) basis.
@param peaksWorkspace_engineering: peaks workspace with engineering instrument applied
@param peaksWorkspace_calibrated: peaks workspace with calibrated instrument applied
@param banknames: bank(s) for diagnostics
@param mode: plotting mode [boxplot, overlay]
@param generate_report: toggle on report generation, default is True.
@param use_logscale: use log scale to plot Chi2, default is True.
@param config: plot configuration dictionary
type: ["png", "pdf", "jpeg"]
mode: ["boxplot", "overlay"]
@param showPlots: open diagnostics plots after generating them.
"""
# parse input
tmp_eng = mtd[peaksWorkspace_engineering] if isinstance(peaksWorkspace_engineering, str) else peaksWorkspace_engineering
logging.info(f"Peaksworkspace at negineering position: {peaksWorkspace_engineering.name()}.")
tmp_cal = mtd[peaksWorkspace_calibrated] if isinstance(peaksWorkspace_calibrated, str) else peaksWorkspace_calibrated
logging.info(f"Peaksworkspace at calibrated position: {peaksWorkspace_calibrated.name()}.")
# set default config for incomplete config dict
if "prefix" not in config.keys():
config["prefix"] = "fig"
if "type" not in config.keys():
config["type"] = "png"
if "saveto" not in config.keys():
config["saveto"] = "."
# some hidden keywords
if "figsize" not in config.keys():
if mode == "boxplot":
config["figsize"] = (12, 4)
else:
config["figsize"] = (14, 10)
# process all banks if banknames is None
if banknames is None:
# NOTE: SCDCalibratePanels will not change the assigned the bank name, therefore only need to
# query the bankname from tmp_cal
banknames = set([tmp_cal.row(i)["BankName"] for i in range(tmp_cal.getNumberPeaks())])
elif isinstance(banknames, str):
banknames = [me.strip() for me in banknames.split(",")]
else:
pass
# prepare plotting data
# NOTE: take advantage of the table structure
logging.info("Prepare plotting data")
pltdata_eng = get_plot_data(tmp_eng, banknames)
pltdata_cal = get_plot_data(tmp_cal, banknames)
# generate plots
logging.info("Generating plots")
#
if mode.lower() == "boxplot":
logging.info("Mode -> boxplot for chi2")
# call the plotter
bank_boxplot(
pltdata_eng=pltdata_eng,
pltdata_cal=pltdata_cal,
figsize=np.array(config["figsize"]),
figname=f"{config['prefix']}.{config['type']}",
saveto=config["saveto"],
use_logscale=use_logscale,
show_plots=showPlots,
)
elif mode.lower() == "overlay":
if generate_report:
logging.warning("Requested report, use PDF as output format.")
config['type'] = 'pdf'
logging.info("Mode -> overlay chi2 on bank")
bank_overlay(
pltdata_eng=pltdata_eng,
pltdata_cal=pltdata_cal,
figsize=np.array(config["figsize"]),
generate_report=generate_report,
figname=f"{config['prefix']}.{config['type']}",
saveto=config["saveto"],
use_logscale=use_logscale,
show_plots=showPlots,
)
else:
raise ValueError(f"Unsupported mode detected, only support boxplot and overlay.")
# close backend handle
if not showPlots:
plt.close("all")
return [pltdata_eng, pltdata_cal]
if __name__ == "__main__":
logging.warning("This module cannot be run as a script.")
| gpl-3.0 |
andrewnc/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
lazywei/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
yyjiang/scikit-learn | sklearn/tests/test_calibration.py | 213 | 12219 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
TimBizeps/BachelorAP | V504_Thermische Elektronenemission/auswertung3.py | 1 | 1423 | import matplotlib as mpl
mpl.use('pgf')
import numpy as np
import scipy.constants as const
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from uncertainties import ufloat
import uncertainties.unumpy as unp
from uncertainties.unumpy import (nominal_values as noms, std_devs as stds)
mpl.rcParams.update({
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
'pgf.texsystem': 'lualatex',
'pgf.preamble': r'\usepackage{unicode-math}\usepackage{siunitx}'
})
U6, I6 = np.genfromtxt('messwerte4.txt', unpack=True)
I6A = I6/1000000000
U6tat = U6 - 1000000*I6A #1MOhm Innenwiderstand
I6log = np.log(I6A)
e = const.e
k = const.k
def f(x, a, b):
return a*x+b
params, covariance = curve_fit(f, U6tat, I6log)
errors = np.sqrt(np.diag(covariance))
print('a =', params[0], '±', errors[0])
print('b =', params[1], '±', errors[1])
a = ufloat(params[0], errors[0])
T = -e/(k*a)
np.savetxt("Parameter2.txt", np.column_stack([params, errors]))
print('T =', noms(T), '±', stds(T))#in Kelvin
x_plot = np.linspace(-0.05, 1)
plt.plot(x_plot, f(x_plot, *params), 'b-', label='Fit', linewidth=1)
plt.plot(U6tat, I6log, 'rx', label='Messwerte', linewidth=1)
plt.xlabel(r'$ln \left( \frac{U}{\si{\volt}} \right)$')
plt.ylabel(r'$ln \left( \frac{I}{\si{\nano\ampere}} \right)$')
plt.xlim(-0.05, 1)
plt.grid()
plt.legend(loc="best")
plt.tight_layout()
plt.savefig("Plot3.pdf")
| gpl-3.0 |
SeyVu/subscription_renewal | support_functions.py | 1 | 2424 | #########################################################################################################
# Description: Collection of support functions that'll be used often
#
#########################################################################################################
import numpy as np
import pandas as pd
import random
import os
#########################################################################################################
__author__ = 'DataCentric1'
__pass__ = 1
__fail__ = 0
#########################################################################################################
# Class to specify color and text formatting for prints
class Color:
def __init__(self):
pass
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
# Returns number of lines in a file in a memory / time efficient way
def file_len(fname):
i = -1
with open(fname) as f:
for i, l in enumerate(f, 1):
pass
return i
# Save numpy array from .npy file to txt file
def save_npy_array_to_txt(npy_fname, txt_fname):
np.savetxt(txt_fname, np.load(npy_fname), fmt='%s')
return __pass__
# Save numpy array from .npy file to csv file. TODO - Double check fn
def save_npy_array_to_csv(npy_fname, csv_fname):
temp_array = np.load(npy_fname)
index_row, index_col = temp_array.shape
print index_row
print index_col
f = open(csv_fname, 'w')
for i in range(index_row):
f.write(temp_array[i, 0])
f.write(",")
f.write(temp_array[i, 1])
f.write("\n")
f.close()
return __pass__
# Returns random floating point value within the range specified
def random_float(low, high):
return random.random()*(high-low) + low
# Returns all elements in the list with format 0.2f
def format_float_0_2f(list_name):
return "["+", ".join(["%.2f" % x for x in list_name])+"]"
# Load Model Data for a CSV
def load_model_data(data_csv='dummy.csv'):
"""
Reads a CSV file, Returns a Pandas Data Frame
:param data_csv:
:return data:
"""
if os.path.isfile(data_csv):
data = pd.read_csv(data_csv, sep=',')
return data
else:
raise ValueError('Input file %s not available', data_csv)
| mit |
adbroido/LRTanalysis | code/sortgmls.py | 2 | 3782 | import numpy as np
import igraph
import glob
import os
import pickle
import pandas as pd
""" Assorted functions to check whether a graph (as an igraph object) has
certain properties. All are meant to be called directly.
"""
# filepath to error file
errorfp = 'gmlerror.txt'
def weighted(g, fp=''):
""" Check whether the graph g is weighted. The built-in igraph check only
looks for a type label of 'weight'. Sometimes gmls will have 'value' instead
so this makes sure to check for both. If 'value' is used, this gml is noted
in an error file so we can fix it later.
Input:
g igraph object, graph to be checked
fp string, filepath to gml file. To be used in error file
if necessary.
Output:
df_entry int, 0 means not multiplex, 1 means multiplex
"""
df_entry = 0
if 'weight' in g.es.attributes():
if len(np.unique(g.es['weight'])) >1:
df_entry = 1
elif 'value' in g.es.attributes():
errormessage = "%s is weighted but has attribute 'value' instead of 'weight'\n" %fp
# only add this line to error file if we haven't already noted this
known = False
f = open(errorfp, 'a+')
f.seek(0)
for line in f:
if line == errormessage:
known = True
if not known:
f.write(errormessage)
f.close()
if len(np.unique(g.es['value'])) >1:
df_entry = 1
return df_entry
def multigraph(g):
""" Check whether the graph g is a multigraph. At this step, multiplex
graphs of any kind are also included. These will be separated later.
Input:
g igraph object, graph to be checked
Output:
df_entry int, 0 means not multigraph, 1 means multigraph
"""
if g.has_multiple():
df_entry = 1
else:
df_entry = 0
return df_entry
def directed(g):
if g.is_directed():
df_entry = 1
else:
df_entry = 0
# if it was already there, we just return it
return df_entry
def multiplex(g):
""" Check whether the graph g is multiplex by checking for edge types.
Input:
g igraph object, graph to be checked
Output:
df_entry int, 0 means not multiplex, 1 means multiplex
"""
df_entry = 0
# check if edges have an attribute other than weight or value
attributes = set(g.es.attributes())
weightattributes = set(['weight', 'value'])
setdiff = attributes.difference(weightattributes)
if len(setdiff)>0:
df_entry = 1
return df_entry
def bipartite(g, fp=None):
""" Check whether the graph g is bipartite.
Input:
g igraph object, graph to be checked
fp string, path to gml file
Output:
df_entry int or string, 0 means not bipartite, 1 means bipartite
'error' means the gml file is not structured correctly
"""
if g.is_bipartite():
if 'type' in g.vs.attributes():
if len(set(g.vs['type'])) > 1:
df_entry = 1
else:
df_entry = 0
else:
if fp:
errormessage = "%s is bipartite and has no attribute 'type'\n" %fp
known = False
f = open(errorfp, 'a+')
f.seek(0)
for line in f:
if line == errormessage:
known = True
if not known:
f.write(errormessage)
f.close()
df_entry = 'error'
else:
df_entry = 0
else:
df_entry = 0
return df_entry
| gpl-3.0 |
InstaSketch/image-picker | imagePicker/image_query/management/commands/query.py | 1 | 2326 | import os
import io
import cProfile
import cv2
import requests
import pstats
import numpy as np
from matplotlib import pyplot as plt
from django.core.management.base import BaseCommand, CommandError
from image_api.imageloader import Imageloader
from image_query import query
class Command(BaseCommand):
help = 'Query Image on CDN and Draw the Results'
def add_arguments(self, parser):
parser.add_argument(
'-i',
'--image',
type=str,
dest="img",
help='The path to image')
parser.add_argument(
'-l',
'--limit',
default=40,
type=int,
dest="limit",
nargs='?',
help='Set output limits')
def handle(self, *args, **options):
if os.path.exists(options['img']):
print('Searching image on local database')
search = query.Search()
pr = cProfile.Profile()
pr.enable()
results = search.search_image(cv2.imread(options['img']), bow_hist=None, color_hist=None, metric='chisqr_alt')
pr.disable()
s = io.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
print(s.getvalue())
top = sorted(results, key=lambda element: (element[2], element[1]))[:options['limit']]
print(top)
print('Generating Results from CDN')
plt.figure()
plt.gray()
plt.subplot(5,9,1)
img = cv2.imread(options['img'])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img)
plt.axis('off')
loader = Imageloader()
j = 0
for img_path, _, _ in top:
i = requests.get(loader.get_url(img_path))
nparr = np.fromstring(i.content, np.uint8)
img = cv2.imdecode(nparr, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.gray()
plt.subplot(5,9,j+5)
j+=1
plt.imshow(img)
plt.axis('off')
plt.show()
self.stdout.write("Exiting...\n", ending='')
else:
self.stderr.write("Error Input Image\n", ending='')
| apache-2.0 |
xyguo/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
YinongLong/scikit-learn | examples/calibration/plot_compare_calibration.py | 82 | 5012 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probabilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilities to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilities closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
OpringaoDoTurno/airflow | tests/operators/hive_operator.py | 40 | 14061 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import datetime
import os
import unittest
import mock
import nose
import six
from airflow import DAG, configuration, operators
configuration.load_test_config()
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
if 'AIRFLOW_RUNALL_TESTS' in os.environ:
import airflow.hooks.hive_hooks
import airflow.operators.presto_to_mysql
class HiveServer2Test(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
self.nondefault_schema = "nondefault"
def test_select_conn(self):
from airflow.hooks.hive_hooks import HiveServer2Hook
sql = "select 1"
hook = HiveServer2Hook()
hook.get_records(sql)
def test_multi_statements(self):
from airflow.hooks.hive_hooks import HiveServer2Hook
sqls = [
"CREATE TABLE IF NOT EXISTS test_multi_statements (i INT)",
"DROP TABLE test_multi_statements",
]
hook = HiveServer2Hook()
hook.get_records(sqls)
def test_get_metastore_databases(self):
if six.PY2:
from airflow.hooks.hive_hooks import HiveMetastoreHook
hook = HiveMetastoreHook()
hook.get_databases()
def test_to_csv(self):
from airflow.hooks.hive_hooks import HiveServer2Hook
sql = "select 1"
hook = HiveServer2Hook()
hook.to_csv(hql=sql, csv_filepath="/tmp/test_to_csv")
def connect_mock(self, host, port,
auth_mechanism, kerberos_service_name,
user, database):
self.assertEqual(database, self.nondefault_schema)
@mock.patch('HiveServer2Hook.connect', return_value="foo")
def test_select_conn_with_schema(self, connect_mock):
from airflow.hooks.hive_hooks import HiveServer2Hook
# Configure
hook = HiveServer2Hook()
# Run
hook.get_conn(self.nondefault_schema)
# Verify
self.assertTrue(connect_mock.called)
(args, kwargs) = connect_mock.call_args_list[0]
self.assertEqual(self.nondefault_schema, kwargs['database'])
def test_get_results_with_schema(self):
from airflow.hooks.hive_hooks import HiveServer2Hook
from unittest.mock import MagicMock
# Configure
sql = "select 1"
schema = "notdefault"
hook = HiveServer2Hook()
cursor_mock = MagicMock(
__enter__=cursor_mock,
__exit__=None,
execute=None,
fetchall=[],
)
get_conn_mock = MagicMock(
__enter__=get_conn_mock,
__exit__=None,
cursor=cursor_mock,
)
hook.get_conn = get_conn_mock
# Run
hook.get_results(sql, schema)
# Verify
get_conn_mock.assert_called_with(self.nondefault_schema)
@mock.patch('HiveServer2Hook.get_results', return_value={'data': []})
def test_get_records_with_schema(self, get_results_mock):
from airflow.hooks.hive_hooks import HiveServer2Hook
# Configure
sql = "select 1"
hook = HiveServer2Hook()
# Run
hook.get_records(sql, self.nondefault_schema)
# Verify
self.assertTrue(self.connect_mock.called)
(args, kwargs) = self.connect_mock.call_args_list[0]
self.assertEqual(sql, args[0])
self.assertEqual(self.nondefault_schema, kwargs['schema'])
@mock.patch('HiveServer2Hook.get_results', return_value={'data': []})
def test_get_pandas_df_with_schema(self, get_results_mock):
from airflow.hooks.hive_hooks import HiveServer2Hook
# Configure
sql = "select 1"
hook = HiveServer2Hook()
# Run
hook.get_pandas_df(sql, self.nondefault_schema)
# Verify
self.assertTrue(self.connect_mock.called)
(args, kwargs) = self.connect_mock.call_args_list[0]
self.assertEqual(sql, args[0])
self.assertEqual(self.nondefault_schema, kwargs['schema'])
class HivePrestoTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.hql = """
USE airflow;
DROP TABLE IF EXISTS static_babynames_partitioned;
CREATE TABLE IF NOT EXISTS static_babynames_partitioned (
state string,
year string,
name string,
gender string,
num int)
PARTITIONED BY (ds string);
INSERT OVERWRITE TABLE static_babynames_partitioned
PARTITION(ds='{{ ds }}')
SELECT state, year, name, gender, num FROM static_babynames;
"""
def test_hive(self):
import airflow.operators.hive_operator
t = operators.hive_operator.HiveOperator(
task_id='basic_hql', hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_queues(self):
import airflow.operators.hive_operator
t = operators.hive_operator.HiveOperator(
task_id='test_hive_queues', hql=self.hql,
mapred_queue='default', mapred_queue_priority='HIGH',
mapred_job_name='airflow.test_hive_queues',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_dryrun(self):
import airflow.operators.hive_operator
t = operators.hive_operator.HiveOperator(
task_id='dry_run_basic_hql', hql=self.hql, dag=self.dag)
t.dry_run()
def test_beeline(self):
import airflow.operators.hive_operator
t = operators.hive_operator.HiveOperator(
task_id='beeline_hql', hive_cli_conn_id='beeline_default',
hql=self.hql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_presto(self):
sql = """
SELECT count(1) FROM airflow.static_babynames_partitioned;
"""
import airflow.operators.presto_check_operator
t = operators.presto_check_operator.PrestoCheckOperator(
task_id='presto_check', sql=sql, dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_presto_to_mysql(self):
import airflow.operators.presto_to_mysql
t = operators.presto_to_mysql.PrestoToMySqlTransfer(
task_id='presto_to_mysql_check',
sql="""
SELECT name, count(*) as ccount
FROM airflow.static_babynames
GROUP BY name
""",
mysql_table='test_static_babynames',
mysql_preoperator='TRUNCATE TABLE test_static_babynames;',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hdfs_sensor(self):
t = operators.sensors.HdfsSensor(
task_id='hdfs_sensor_check',
filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_webhdfs_sensor(self):
t = operators.sensors.WebHdfsSensor(
task_id='webhdfs_sensor_check',
filepath='hdfs://user/hive/warehouse/airflow.db/static_babynames',
timeout=120,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_sql_sensor(self):
t = operators.sensors.SqlSensor(
task_id='hdfs_sensor_check',
conn_id='presto_default',
sql="SELECT 'x' FROM airflow.static_babynames LIMIT 1;",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_stats(self):
import airflow.operators.hive_stats_operator
t = operators.hive_stats_operator.HiveStatsCollectionOperator(
task_id='hive_stats_check',
table="airflow.static_babynames_partitioned",
partition={'ds': DEFAULT_DATE_DS},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor(self):
t = operators.sensors.NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}"
],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor_succeeds_on_multiple_partitions(self):
t = operators.sensors.NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}",
"airflow.static_babynames_partitioned/ds={{ds}}"
],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_named_hive_partition_sensor_parses_partitions_with_periods(self):
t = operators.sensors.NamedHivePartitionSensor.parse_partition_name(
partition="schema.table/part1=this.can.be.an.issue/part2=ok")
self.assertEqual(t[0], "schema")
self.assertEqual(t[1], "table")
self.assertEqual(t[2], "part1=this.can.be.an.issue/part2=this_should_be_ok")
@nose.tools.raises(airflow.exceptions.AirflowSensorTimeout)
def test_named_hive_partition_sensor_times_out_on_nonexistent_partition(self):
t = operators.sensors.NamedHivePartitionSensor(
task_id='hive_partition_check',
partition_names=[
"airflow.static_babynames_partitioned/ds={{ds}}",
"airflow.static_babynames_partitioned/ds=nonexistent"
],
poke_interval=0.1,
timeout=1,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_partition_sensor(self):
t = operators.sensors.HivePartitionSensor(
task_id='hive_partition_check',
table='airflow.static_babynames_partitioned',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_metastore_sql_sensor(self):
t = operators.sensors.MetastorePartitionSensor(
task_id='hive_partition_check',
table='airflow.static_babynames_partitioned',
partition_name='ds={}'.format(DEFAULT_DATE_DS),
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive2samba(self):
import airflow.operators.hive_to_samba_operator
t = operators.hive_to_samba_operator.Hive2SambaOperator(
task_id='hive2samba_check',
samba_conn_id='tableau_samba',
hql="SELECT * FROM airflow.static_babynames LIMIT 10000",
destination_filepath='test_airflow.csv',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
def test_hive_to_mysql(self):
import airflow.operators.hive_to_mysql
t = operators.hive_to_mysql.HiveToMySqlTransfer(
mysql_conn_id='airflow_db',
task_id='hive_to_mysql_check',
create=True,
sql="""
SELECT name
FROM airflow.static_babynames
LIMIT 100
""",
mysql_table='test_static_babynames',
mysql_preoperator=[
'DROP TABLE IF EXISTS test_static_babynames;',
'CREATE TABLE test_static_babynames (name VARCHAR(500))',
],
dag=self.dag)
t.clear(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
ignore_ti_state=True)
| apache-2.0 |
depet/scikit-learn | examples/svm/plot_rbf_parameters.py | 6 | 4080 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters `gamma`
and `C` of the rbf kernel SVM.
Intuitively, the `gamma` parameter defines how far the influence
of a single training example reaches, with low values meaning 'far'
and high values meaning 'close'.
The `C` parameter trades off misclassification of training examples
against simplicity of the decision surface. A low C makes
the decision surface smooth, while a high C aims at classifying
all training examples correctly.
Two plots are generated. The first is a visualization of the
decision function for a variety of parameter values, and the second
is a heatmap of the classifier's cross-validation accuracy as
a function of `C` and `gamma`.
'''
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedKFold
from sklearn.grid_search import GridSearchCV
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
Y = iris.target
# dataset for decision function visualization
X_2d = X[:, :2]
X_2d = X_2d[Y > 0]
Y_2d = Y[Y > 0]
Y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifier
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = 10.0 ** np.arange(-2, 9)
gamma_range = 10.0 ** np.arange(-5, 4)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedKFold(y=Y, n_folds=3)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, Y)
print("The best classifier is: ", grid.best_estimator_)
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1, 1e2, 1e4]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, Y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
pl.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-5, 5, 200), np.linspace(-5, 5, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
pl.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
pl.title("gamma 10^%d, C 10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
pl.pcolormesh(xx, yy, -Z, cmap=pl.cm.jet)
pl.scatter(X_2d[:, 0], X_2d[:, 1], c=Y_2d, cmap=pl.cm.jet)
pl.xticks(())
pl.yticks(())
pl.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
score_dict = grid.grid_scores_
# We extract just the scores
scores = [x[1] for x in score_dict]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# draw heatmap of accuracy as a function of gamma and C
pl.figure(figsize=(8, 6))
pl.subplots_adjust(left=0.05, right=0.95, bottom=0.15, top=0.95)
pl.imshow(scores, interpolation='nearest', cmap=pl.cm.spectral)
pl.xlabel('gamma')
pl.ylabel('C')
pl.colorbar()
pl.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
pl.yticks(np.arange(len(C_range)), C_range)
pl.show()
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/scalar/test_interval.py | 7 | 3606 | from __future__ import division
import pytest
from pandas import Interval
import pandas.util.testing as tm
class TestInterval(object):
def setup_method(self, method):
self.interval = Interval(0, 1)
def test_properties(self):
assert self.interval.closed == 'right'
assert self.interval.left == 0
assert self.interval.right == 1
assert self.interval.mid == 0.5
def test_repr(self):
assert repr(self.interval) == "Interval(0, 1, closed='right')"
assert str(self.interval) == "(0, 1]"
interval_left = Interval(0, 1, closed='left')
assert repr(interval_left) == "Interval(0, 1, closed='left')"
assert str(interval_left) == "[0, 1)"
def test_contains(self):
assert 0.5 in self.interval
assert 1 in self.interval
assert 0 not in self.interval
pytest.raises(TypeError, lambda: self.interval in self.interval)
interval = Interval(0, 1, closed='both')
assert 0 in interval
assert 1 in interval
interval = Interval(0, 1, closed='neither')
assert 0 not in interval
assert 0.5 in interval
assert 1 not in interval
def test_equal(self):
assert Interval(0, 1) == Interval(0, 1, closed='right')
assert Interval(0, 1) != Interval(0, 1, closed='left')
assert Interval(0, 1) != 0
def test_comparison(self):
with tm.assert_raises_regex(TypeError, 'unorderable types'):
Interval(0, 1) < 2
assert Interval(0, 1) < Interval(1, 2)
assert Interval(0, 1) < Interval(0, 2)
assert Interval(0, 1) < Interval(0.5, 1.5)
assert Interval(0, 1) <= Interval(0, 1)
assert Interval(0, 1) > Interval(-1, 2)
assert Interval(0, 1) >= Interval(0, 1)
def test_hash(self):
# should not raise
hash(self.interval)
def test_math_add(self):
expected = Interval(1, 2)
actual = self.interval + 1
assert expected == actual
expected = Interval(1, 2)
actual = 1 + self.interval
assert expected == actual
actual = self.interval
actual += 1
assert expected == actual
with pytest.raises(TypeError):
self.interval + Interval(1, 2)
with pytest.raises(TypeError):
self.interval + 'foo'
def test_math_sub(self):
expected = Interval(-1, 0)
actual = self.interval - 1
assert expected == actual
actual = self.interval
actual -= 1
assert expected == actual
with pytest.raises(TypeError):
self.interval - Interval(1, 2)
with pytest.raises(TypeError):
self.interval - 'foo'
def test_math_mult(self):
expected = Interval(0, 2)
actual = self.interval * 2
assert expected == actual
expected = Interval(0, 2)
actual = 2 * self.interval
assert expected == actual
actual = self.interval
actual *= 2
assert expected == actual
with pytest.raises(TypeError):
self.interval * Interval(1, 2)
with pytest.raises(TypeError):
self.interval * 'foo'
def test_math_div(self):
expected = Interval(0, 0.5)
actual = self.interval / 2.0
assert expected == actual
actual = self.interval
actual /= 2.0
assert expected == actual
with pytest.raises(TypeError):
self.interval / Interval(1, 2)
with pytest.raises(TypeError):
self.interval / 'foo'
| mit |
amiremadmarvasti/cuda-convnet2 | convdata.py | 174 | 14675 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from python_util.data import *
import numpy.random as nr
import numpy as n
import random as r
from time import time
from threading import Thread
from math import sqrt
import sys
#from matplotlib import pylab as pl
from PIL import Image
from StringIO import StringIO
from time import time
import itertools as it
class JPEGBatchLoaderThread(Thread):
def __init__(self, dp, batch_num, label_offset, list_out):
Thread.__init__(self)
self.list_out = list_out
self.label_offset = label_offset
self.dp = dp
self.batch_num = batch_num
@staticmethod
def load_jpeg_batch(rawdics, dp, label_offset):
if type(rawdics) != list:
rawdics = [rawdics]
nc_total = sum(len(r['data']) for r in rawdics)
jpeg_strs = list(it.chain.from_iterable(rd['data'] for rd in rawdics))
labels = list(it.chain.from_iterable(rd['labels'] for rd in rawdics))
img_mat = n.empty((nc_total * dp.data_mult, dp.inner_pixels * dp.num_colors), dtype=n.float32)
lab_mat = n.zeros((nc_total, dp.get_num_classes()), dtype=n.float32)
dp.convnet.libmodel.decodeJpeg(jpeg_strs, img_mat, dp.img_size, dp.inner_size, dp.test, dp.multiview)
lab_vec = n.tile(n.asarray([(l[nr.randint(len(l))] if len(l) > 0 else -1) + label_offset for l in labels], dtype=n.single).reshape((nc_total, 1)), (dp.data_mult,1))
for c in xrange(nc_total):
lab_mat[c, [z + label_offset for z in labels[c]]] = 1
lab_mat = n.tile(lab_mat, (dp.data_mult, 1))
return {'data': img_mat[:nc_total * dp.data_mult,:],
'labvec': lab_vec[:nc_total * dp.data_mult,:],
'labmat': lab_mat[:nc_total * dp.data_mult,:]}
def run(self):
rawdics = self.dp.get_batch(self.batch_num)
p = JPEGBatchLoaderThread.load_jpeg_batch(rawdics,
self.dp,
self.label_offset)
self.list_out.append(p)
class ColorNoiseMakerThread(Thread):
def __init__(self, pca_stdevs, pca_vecs, num_noise, list_out):
Thread.__init__(self)
self.pca_stdevs, self.pca_vecs = pca_stdevs, pca_vecs
self.num_noise = num_noise
self.list_out = list_out
def run(self):
noise = n.dot(nr.randn(self.num_noise, 3).astype(n.single) * self.pca_stdevs.T, self.pca_vecs.T)
self.list_out.append(noise)
class ImageDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.data_mean = self.batch_meta['data_mean'].astype(n.single)
self.color_eig = self.batch_meta['color_pca'][1].astype(n.single)
self.color_stdevs = n.c_[self.batch_meta['color_pca'][0].astype(n.single)]
self.color_noise_coeff = dp_params['color_noise']
self.num_colors = 3
self.img_size = int(sqrt(self.batch_meta['num_vis'] / self.num_colors))
self.mini = dp_params['minibatch_size']
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.img_size
self.inner_pixels = self.inner_size **2
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 5*2
self.data_mult = self.num_views if self.multiview else 1
self.batch_size = self.batch_meta['batch_size']
self.label_offset = 0 if 'label_offset' not in self.batch_meta else self.batch_meta['label_offset']
self.scalar_mean = dp_params['scalar_mean']
# Maintain pointers to previously-returned data matrices so they don't get garbage collected.
self.data = [None, None] # These are pointers to previously-returned data matrices
self.loader_thread, self.color_noise_thread = None, None
self.convnet = dp_params['convnet']
self.num_noise = self.batch_size
self.batches_generated, self.loaders_started = 0, 0
self.data_mean_crop = self.data_mean.reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((1,3*self.inner_size**2))
if self.scalar_mean >= 0:
self.data_mean_crop = self.scalar_mean
def showimg(self, img):
from matplotlib import pylab as pl
pixels = img.shape[0] / 3
size = int(sqrt(pixels))
img = img.reshape((3,size,size)).swapaxes(0,2).swapaxes(0,1)
pl.imshow(img, interpolation='nearest')
pl.show()
def get_data_dims(self, idx=0):
if idx == 0:
return self.inner_size**2 * 3
if idx == 2:
return self.get_num_classes()
return 1
def start_loader(self, batch_idx):
self.load_data = []
self.loader_thread = JPEGBatchLoaderThread(self,
self.batch_range[batch_idx],
self.label_offset,
self.load_data)
self.loader_thread.start()
def start_color_noise_maker(self):
color_noise_list = []
self.color_noise_thread = ColorNoiseMakerThread(self.color_stdevs, self.color_eig, self.num_noise, color_noise_list)
self.color_noise_thread.start()
return color_noise_list
def set_labels(self, datadic):
pass
def get_data_from_loader(self):
if self.loader_thread is None:
self.start_loader(self.batch_idx)
self.loader_thread.join()
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
else:
# Set the argument to join to 0 to re-enable batch reuse
self.loader_thread.join()
if not self.loader_thread.is_alive():
self.data[self.d_idx] = self.load_data[0]
self.start_loader(self.get_next_batch_idx())
#else:
# print "Re-using batch"
self.advance_batch()
def add_color_noise(self):
# At this point the data already has 0 mean.
# So I'm going to add noise to it, but I'm also going to scale down
# the original data. This is so that the overall scale of the training
# data doesn't become too different from the test data.
s = self.data[self.d_idx]['data'].shape
cropped_size = self.get_data_dims(0) / 3
ncases = s[0]
if self.color_noise_thread is None:
self.color_noise_list = self.start_color_noise_maker()
self.color_noise_thread.join()
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
else:
self.color_noise_thread.join(0)
if not self.color_noise_thread.is_alive():
self.color_noise = self.color_noise_list[0]
self.color_noise_list = self.start_color_noise_maker()
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases*3, cropped_size))
self.color_noise = self.color_noise[:ncases,:].reshape((3*ncases, 1))
self.data[self.d_idx]['data'] += self.color_noise * self.color_noise_coeff
self.data[self.d_idx]['data'] = self.data[self.d_idx]['data'].reshape((ncases, 3* cropped_size))
self.data[self.d_idx]['data'] *= 1.0 / (1.0 + self.color_noise_coeff) # <--- NOTE: This is the slow line, 0.25sec. Down from 0.75sec when I used division.
def get_next_batch(self):
self.d_idx = self.batches_generated % 2
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.get_data_from_loader()
# Subtract mean
self.data[self.d_idx]['data'] -= self.data_mean_crop
if self.color_noise_coeff > 0 and not self.test:
self.add_color_noise()
self.batches_generated += 1
return epoch, batchnum, [self.data[self.d_idx]['data'].T, self.data[self.d_idx]['labvec'].T, self.data[self.d_idx]['labmat'].T]
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data, add_mean=True):
mean = self.data_mean_crop.reshape((data.shape[0],1)) if data.flags.f_contiguous or self.scalar_mean else self.data_mean_crop.reshape((data.shape[0],1))
return n.require((data + (mean if add_mean else 0)).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
class CIFARDataProvider(LabeledDataProvider):
def __init__(self, data_dir, batch_range=None, init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.img_size = 32
self.num_colors = 3
self.inner_size = dp_params['inner_size'] if dp_params['inner_size'] > 0 else self.batch_meta['img_size']
self.border_size = (self.img_size - self.inner_size) / 2
self.multiview = dp_params['multiview_test'] and test
self.num_views = 9
self.scalar_mean = dp_params['scalar_mean']
self.data_mult = self.num_views if self.multiview else 1
self.data_dic = []
for i in batch_range:
self.data_dic += [unpickle(self.get_data_file_name(i))]
self.data_dic[-1]["labels"] = n.require(self.data_dic[-1]['labels'], dtype=n.single)
self.data_dic[-1]["labels"] = n.require(n.tile(self.data_dic[-1]["labels"].reshape((1, n.prod(self.data_dic[-1]["labels"].shape))), (1, self.data_mult)), requirements='C')
self.data_dic[-1]['data'] = n.require(self.data_dic[-1]['data'] - self.scalar_mean, dtype=n.single, requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
def get_next_batch(self):
epoch, batchnum = self.curr_epoch, self.curr_batchnum
self.advance_batch()
bidx = batchnum - self.batch_range[0]
cropped = self.cropped_data[self.batches_generated % 2]
self.__trim_borders(self.data_dic[bidx]['data'], cropped)
cropped -= self.data_mean
self.batches_generated += 1
return epoch, batchnum, [cropped, self.data_dic[bidx]['labels']]
def get_data_dims(self, idx=0):
return self.inner_size**2 * self.num_colors if idx == 0 else 1
# Takes as input an array returned by get_next_batch
# Returns a (numCases, imgSize, imgSize, 3) array which can be
# fed to pylab for plotting.
# This is used by shownet.py to plot test case predictions.
def get_plottable_data(self, data):
return n.require((data + self.data_mean).T.reshape(data.shape[1], 3, self.inner_size, self.inner_size).swapaxes(1,3).swapaxes(1,2) / 255.0, dtype=n.single)
def __trim_borders(self, x, target):
y = x.reshape(self.num_colors, self.img_size, self.img_size, x.shape[1])
if self.test: # don't need to loop over cases
if self.multiview:
start_positions = [(0,0), (0, self.border_size), (0, self.border_size*2),
(self.border_size, 0), (self.border_size, self.border_size), (self.border_size, self.border_size*2),
(self.border_size*2, 0), (self.border_size*2, self.border_size), (self.border_size*2, self.border_size*2)]
end_positions = [(sy+self.inner_size, sx+self.inner_size) for (sy,sx) in start_positions]
for i in xrange(self.num_views):
target[:,i * x.shape[1]:(i+1)* x.shape[1]] = y[:,start_positions[i][0]:end_positions[i][0],start_positions[i][1]:end_positions[i][1],:].reshape((self.get_data_dims(),x.shape[1]))
else:
pic = y[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size, :] # just take the center for now
target[:,:] = pic.reshape((self.get_data_dims(), x.shape[1]))
else:
for c in xrange(x.shape[1]): # loop over cases
startY, startX = nr.randint(0,self.border_size*2 + 1), nr.randint(0,self.border_size*2 + 1)
endY, endX = startY + self.inner_size, startX + self.inner_size
pic = y[:,startY:endY,startX:endX, c]
if nr.randint(2) == 0: # also flip the image with 50% probability
pic = pic[:,:,::-1]
target[:,c] = pic.reshape((self.get_data_dims(),))
class DummyConvNetLogRegDataProvider(LabeledDummyDataProvider):
def __init__(self, data_dim):
LabeledDummyDataProvider.__init__(self, data_dim)
self.img_size = int(sqrt(data_dim/3))
def get_next_batch(self):
epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)
dic = {'data': dic[0], 'labels': dic[1]}
print dic['data'].shape, dic['labels'].shape
return epoch, batchnum, [dic['data'], dic['labels']]
# Returns the dimensionality of the two data matrices returned by get_next_batch
def get_data_dims(self, idx=0):
return self.batch_meta['num_vis'] if idx == 0 else 1
| apache-2.0 |
kristoforcarlson/nest-simulator-fork | pynest/examples/intrinsic_currents_subthreshold.py | 9 | 7172 | # -*- coding: utf-8 -*-
#
# intrinsic_currents_subthreshold.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Intrinsic currents subthreshold
-------------------------------
This example illustrates how to record from a model with multiple
intrinsic currents and visualize the results. This is illustrated
using the `ht_neuron` which has four intrinsic currents: I_NaP,
I_KNa, I_T, and I_h. It is a slightly simplified implementation of
neuron model proposed in Hill and Tononi (2005) **Modeling Sleep and Wakefulness in the
Thalamocortical System** *J Neurophysiol* 93:1671 http://dx.doi.org/10.1152/jn.00915.2004 .
The neuron is driven by DC current, which is alternated
between depolarizing and hyperpolarizing. Hyperpolarization
intervals become increasingly longer.
See also: intrinsic_currents_spiking.py
'''
'''
We import all necessary modules for simulation, analysis and
plotting. Additionally, we set the verbosity using `set_verbosity` to
suppress info messages. We also reset the kernel to be sure to start
with a clean NEST.
'''
import nest
import numpy as np
import matplotlib.pyplot as plt
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
'''
We define simulation parameters:
- The length of depolarization intervals
- The length of hyperpolarization intervals
- The amplitude for de- and hyperpolarizing currents
- The end of the time window to plot
'''
n_blocks = 5
t_block = 20.
t_dep = [t_block] * n_blocks
t_hyp = [t_block * 2**n for n in range(n_blocks)]
I_dep = 10.
I_hyp = -5.
t_end = 500.
'''
We create the one neuron instance and the DC current generator
and store the returned handles.
'''
nrn = nest.Create('ht_neuron')
dc = nest.Create('dc_generator')
'''
We create a multimeter to record
- membrane potential `V_m`
- threshold value `Theta`
- intrinsic currents `I_NaP`, `I_KNa`, `I_T`, `I_h`
by passing these names in the `record_from` list.
To find out which quantities can be recorded from a given neuron,
run::
nest.GetDefaults('ht_neuron')['recordables']
The result will contain an entry like::
<SLILiteral: V_m>
for each recordable quantity. You need to pass the value of the `SLILiteral`,
in this case `V_m` in the `record_from` list.
We want to record values with 0.1 ms resolution, so we set the
recording interval as well; the default recording resolution is 1 ms.
'''
# create multimeter and configure it to record all information
# we want at 0.1ms resolution
mm = nest.Create('multimeter',
params={'interval': 0.1,
'record_from': ['V_m', 'Theta',
'I_NaP', 'I_KNa', 'I_T', 'I_h']}
)
'''
We connect the DC generator and the multimeter to the neuron.
Note that the multimeter, just like the voltmeter is connected
to the neuron, not the neuron to the multimeter.
'''
nest.Connect(dc, nrn)
nest.Connect(mm, nrn)
'''
We are ready to simulate. We alternate between driving the neuron
with depolarizing and hyperpolarizing currents. Before each simulation
interval, we set the amplitude of the DC generator to the correct value.
'''
for t_sim_dep, t_sim_hyp in zip(t_dep, t_hyp):
nest.SetStatus(dc, {'amplitude': I_dep})
nest.Simulate(t_sim_dep)
nest.SetStatus(dc, {'amplitude': I_hyp})
nest.Simulate(t_sim_hyp)
'''
We now fetch the data recorded by the multimeter. The data are
returned as a dictionary with entry ``'times'`` containing timestamps
for all recorded data, plus one entry per recorded quantity.
All data is contained in the ``'events'`` entry of the status dictionary
returned by the multimeter. Because all NEST function return arrays,
we need to pick out element ``0`` from the result of `GetStatus`.
'''
data = nest.GetStatus(mm)[0]['events']
t = data['times']
'''
The next step is to plot the results. We create a new figure, add a
single subplot and plot at first membrane potential and threshold.
'''
fig = plt.figure()
Vax = fig.add_subplot(111)
Vax.plot(t, data['V_m'], 'b-', lw=2, label=r'$V_m$')
Vax.plot(t, data['Theta'], 'g-', lw=2, label=r'$\Theta$')
Vax.set_ylim(-80., 0.)
Vax.set_ylabel('Voltageinf [mV]')
Vax.set_xlabel('Time [ms]')
'''
To plot the input current, we need to create an input
current trace. We construct it from the durations of the de- and
hyperpolarizing inputs and add the delay in the connection between
DC generator and neuron:
1. We find the delay by checking the status of the dc->nrn connection.
1. We find the resolution of the simulation from the kernel status.
1. Each current interval begins one time step after the previous interval,
is delayed by the delay and effective for the given duration.
1. We build the time axis incrementally. We only add the delay when adding
the first time point after t=0. All subsequent points are then automatically
shifted by the delay.
'''
delay = nest.GetStatus(nest.GetConnections(dc, nrn))[0]['delay']
dt = nest.GetKernelStatus('resolution')
t_dc, I_dc = [0], [0]
for td, th in zip(t_dep, t_hyp):
t_prev = t_dc[-1]
t_start_dep = t_prev + dt if t_prev > 0 else t_prev + dt + delay
t_end_dep = t_start_dep + td
t_start_hyp = t_end_dep + dt
t_end_hyp = t_start_hyp + th
t_dc.extend([t_start_dep, t_end_dep, t_start_hyp, t_end_hyp])
I_dc.extend([I_dep, I_dep, I_hyp, I_hyp])
'''
The following function turns a name such as I_NaP into proper TeX code
$I_{\mathrm{NaP}}$ for a pretty label.
'''
texify_name = lambda name: r'${}_{{\mathrm{{{}}}}}$'.format(*name.split('_'))
'''
Next, we add a right vertical axis and plot the currents with respect
to that axis.
'''
Iax = Vax.twinx()
Iax.plot(t_dc, I_dc, 'k-', lw=2, label=texify_name('I_DC'))
for iname, color in (('I_h', 'maroon'), ('I_T', 'orange'),
('I_NaP', 'crimson'), ('I_KNa', 'aqua')):
Iax.plot(t, data[iname], color=color, lw=2, label=texify_name(iname))
Iax.set_xlim(0, t_end)
Iax.set_ylim(-10., 15.)
Iax.set_ylabel('Current [pA]')
Iax.set_title('ht_neuron driven by DC current')
'''
We need to make a little extra effort to combine lines from the two axis
into one legend.
'''
lines_V, labels_V = Vax.get_legend_handles_labels()
lines_I, labels_I = Iax.get_legend_handles_labels()
try:
Iax.legend(lines_V + lines_I, labels_V + labels_I, fontsize='small')
except TypeError:
Iax.legend(lines_V + lines_I, labels_V + labels_I) # work-around for older Matplotlib versions
'''
Note that I_KNa is not activated in this example because the neuron does
not spike. I_T has only a very small amplitude.
'''
| gpl-2.0 |
konstantint/matplotlib-venn | tests/region_test.py | 1 | 4302 | '''
Venn diagram plotting routines.
Test module (meant to be used via py.test).
Tests of the classes and methods in _regions.py
Copyright 2014, Konstantin Tretyakov.
http://kt.era.ee/
Licensed under MIT license.
'''
import pytest
import os
import numpy as np
from tests.utils import exec_ipynb
from matplotlib_venn._region import VennCircleRegion, VennArcgonRegion, VennRegionException
from matplotlib_venn._math import tol
def test_circle_region():
with pytest.raises(VennRegionException):
vcr = VennCircleRegion((0, 0), -1)
vcr = VennCircleRegion((0, 0), 10)
assert abs(vcr.size() - np.pi*100) <= tol
# Interact with non-intersecting circle
sr, ir = vcr.subtract_and_intersect_circle((11, 1), 1)
assert sr == vcr
assert ir.is_empty()
# Interact with self
sr, ir = vcr.subtract_and_intersect_circle((0, 0), 10)
assert sr.is_empty()
assert ir == vcr
# Interact with a circle that makes a hole
with pytest.raises(VennRegionException):
sr, ir = vcr.subtract_and_intersect_circle((0, 8.9), 1)
# Interact with a circle that touches the side from the inside
for (a, r) in [(0, 1), (90, 2), (180, 3), (290, 0.01), (42, 9.99), (-0.1, 9.999), (180.1, 0.001)]:
cx = np.cos(a * np.pi / 180.0) * (10 - r)
cy = np.sin(a * np.pi / 180.0) * (10 - r)
#print "Next test case", a, r, cx, cy, r
TEST_TOLERANCE = tol if r > 0.001 and r < 9.999 else 1e-4 # For tricky circles the numeric errors for arc lengths are just too big here
sr, ir = vcr.subtract_and_intersect_circle((cx, cy), r)
sr.verify()
ir.verify()
assert len(sr.arcs) == 2 and len(ir.arcs) == 2
for a in sr.arcs:
assert abs(a.length_degrees() - 360) < TEST_TOLERANCE
assert abs(ir.arcs[0].length_degrees() - 0) < TEST_TOLERANCE
assert abs(ir.arcs[1].length_degrees() - 360) < TEST_TOLERANCE
assert abs(sr.size() + np.pi*r**2 - vcr.size()) < tol
assert abs(ir.size() - np.pi*r**2) < tol
# Interact with a circle that touches the side from the outside
for (a, r) in [(0, 1), (90, 2), (180, 3), (290, 0.01), (42, 9.99), (-0.1, 9.999), (180.1, 0.001)]:
cx = np.cos(a * np.pi / 180.0) * (10 + r)
cy = np.sin(a * np.pi / 180.0) * (10 + r)
sr, ir = vcr.subtract_and_intersect_circle((cx, cy), r)
# Depending on numeric roundoff we may get either an self and VennEmptyRegion or two arc regions. In any case the sizes should match
assert abs(sr.size() + ir.size() - vcr.size()) < tol
if (sr == vcr):
assert ir.is_empty()
else:
sr.verify()
ir.verify()
assert len(sr.arcs) == 2 and len(ir.arcs) == 2
assert abs(sr.arcs[0].length_degrees() - 0) < tol
assert abs(sr.arcs[1].length_degrees() - 360) < tol
assert abs(ir.arcs[0].length_degrees() - 0) < tol
assert abs(ir.arcs[1].length_degrees() - 0) < tol
# Interact with some cases of intersecting circles
for (a, r) in [(0, 1), (90, 2), (180, 3), (290, 0.01), (42, 9.99), (-0.1, 9.999), (180.1, 0.001)]:
cx = np.cos(a * np.pi / 180.0) * 10
cy = np.sin(a * np.pi / 180.0) * 10
sr, ir = vcr.subtract_and_intersect_circle((cx, cy), r)
sr.verify()
ir.verify()
assert len(sr.arcs) == 2 and len(ir.arcs) == 2
assert abs(sr.size() + ir.size() - vcr.size()) < tol
assert sr.size() > 0
assert ir.size() > 0
# Do intersection the other way
vcr2 = VennCircleRegion([cx, cy], r)
sr2, ir2 = vcr2.subtract_and_intersect_circle(vcr.center, vcr.radius)
sr2.verify()
ir2.verify()
assert len(sr2.arcs) == 2 and len(ir2.arcs) == 2
assert abs(sr2.size() + ir2.size() - vcr2.size()) < tol
assert sr2.size() > 0
assert ir2.size() > 0
for i in range(2):
assert ir.arcs[i].approximately_equal(ir.arcs[i])
def test_region_visual():
exec_ipynb(os.path.join(os.path.dirname(__file__), "region_visual.ipynb"))
def test_region_label_visual():
exec_ipynb(os.path.join(os.path.dirname(__file__), "region_label_visual.ipynb"))
| mit |
huzq/scikit-learn | examples/preprocessing/plot_scaling_importance.py | 34 | 5381 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Importance of Feature Scaling
=========================================================
Feature scaling through standardization (or Z-score normalization)
can be an important preprocessing step for many machine learning
algorithms. Standardization involves rescaling the features such
that they have the properties of a standard normal distribution
with a mean of zero and a standard deviation of one.
While many algorithms (such as SVM, K-nearest neighbors, and logistic
regression) require features to be normalized, intuitively we can
think of Principle Component Analysis (PCA) as being a prime example
of when normalization is important. In PCA we are interested in the
components that maximize the variance. If one component (e.g. human
height) varies less than another (e.g. weight) because of their
respective scales (meters vs. kilos), PCA might determine that the
direction of maximal variance more closely corresponds with the
'weight' axis, if those features are not scaled. As a change in
height of one meter can be considered much more important than the
change in weight of one kilogram, this is clearly incorrect.
To illustrate this, PCA is performed comparing the use of data with
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` applied,
to unscaled data. The results are visualized and a clear difference noted.
The 1st principal component in the unscaled set can be seen. It can be seen
that feature #13 dominates the direction, being a whole two orders of
magnitude above the other features. This is contrasted when observing
the principal component for the scaled version of the data. In the scaled
version, the orders of magnitude are roughly the same across all the features.
The dataset used is the Wine Dataset available at UCI. This dataset
has continuous features that are heterogeneous in scale due to differing
properties that they measure (i.e alcohol content, and malic acid).
The transformed data is then used to train a naive Bayes classifier, and a
clear difference in prediction accuracies is observed wherein the dataset
which is scaled before PCA vastly outperforms the unscaled version.
"""
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.datasets import load_wine
from sklearn.pipeline import make_pipeline
print(__doc__)
# Code source: Tyler Lanigan <tylerlanigan@gmail.com>
# Sebastian Raschka <mail@sebastianraschka.com>
# License: BSD 3 clause
RANDOM_STATE = 42
FIG_SIZE = (10, 7)
features, target = load_wine(return_X_y=True)
# Make a train/test split using 30% test size
X_train, X_test, y_train, y_test = train_test_split(features, target,
test_size=0.30,
random_state=RANDOM_STATE)
# Fit to data and predict using pipelined GNB and PCA.
unscaled_clf = make_pipeline(PCA(n_components=2), GaussianNB())
unscaled_clf.fit(X_train, y_train)
pred_test = unscaled_clf.predict(X_test)
# Fit to data and predict using pipelined scaling, GNB and PCA.
std_clf = make_pipeline(StandardScaler(), PCA(n_components=2), GaussianNB())
std_clf.fit(X_train, y_train)
pred_test_std = std_clf.predict(X_test)
# Show prediction accuracies in scaled and unscaled data.
print('\nPrediction accuracy for the normal test dataset with PCA')
print('{:.2%}\n'.format(metrics.accuracy_score(y_test, pred_test)))
print('\nPrediction accuracy for the standardized test dataset with PCA')
print('{:.2%}\n'.format(metrics.accuracy_score(y_test, pred_test_std)))
# Extract PCA from pipeline
pca = unscaled_clf.named_steps['pca']
pca_std = std_clf.named_steps['pca']
# Show first principal components
print('\nPC 1 without scaling:\n', pca.components_[0])
print('\nPC 1 with scaling:\n', pca_std.components_[0])
# Use PCA without and with scale on X_train data for visualization.
X_train_transformed = pca.transform(X_train)
scaler = std_clf.named_steps['standardscaler']
X_train_std_transformed = pca_std.transform(scaler.transform(X_train))
# visualize standardized vs. untouched dataset with PCA performed
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=FIG_SIZE)
for l, c, m in zip(range(0, 3), ('blue', 'red', 'green'), ('^', 's', 'o')):
ax1.scatter(X_train_transformed[y_train == l, 0],
X_train_transformed[y_train == l, 1],
color=c,
label='class %s' % l,
alpha=0.5,
marker=m
)
for l, c, m in zip(range(0, 3), ('blue', 'red', 'green'), ('^', 's', 'o')):
ax2.scatter(X_train_std_transformed[y_train == l, 0],
X_train_std_transformed[y_train == l, 1],
color=c,
label='class %s' % l,
alpha=0.5,
marker=m
)
ax1.set_title('Training dataset after PCA')
ax2.set_title('Standardized training dataset after PCA')
for ax in (ax1, ax2):
ax.set_xlabel('1st principal component')
ax.set_ylabel('2nd principal component')
ax.legend(loc='upper right')
ax.grid()
plt.tight_layout()
plt.show()
| bsd-3-clause |
bartslinger/paparazzi | sw/misc/attitude_reference/pat/utils.py | 42 | 6283 | #
# Copyright 2013-2014 Antoine Drouin (poinix@gmail.com)
#
# This file is part of PAT.
#
# PAT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PAT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PAT. If not, see <http://www.gnu.org/licenses/>.
#
"""
Utility functions
"""
import math
import numpy as np
import numpy.linalg as linalg
import pdb
"""
Unit convertions
"""
def rad_of_deg(d):
return d / 180. * math.pi
def sqrad_of_sqdeg(d):
return d / (180. * math.pi) ** 2
def deg_of_rad(r):
return r * 180. / math.pi
def sqdeg_of_sqrad(r):
return r * (180. / math.pi) ** 2
def rps_of_rpm(r):
return r * 2. * math.pi / 60.
def rpm_of_rps(r):
return r / 2. / math.pi * 60.
# http://en.wikipedia.org/wiki/Nautical_mile
def m_of_NM(nm):
return nm * 1852.
def NM_of_m(m):
return m / 1852.
# http://en.wikipedia.org/wiki/Knot_(speed)
def mps_of_kt(kt):
return kt * 0.514444
def kt_of_mps(mps):
return mps / 0.514444
# http://en.wikipedia.org/wiki/Foot_(unit)
def m_of_ft(ft):
return ft * 0.3048
def ft_of_m(m):
return m / 0.3048
# feet per minute to/from meters per second
def ftpm_of_mps(mps):
return mps * 60. * 3.28084
def mps_of_ftpm(ftpm):
return ftpm / 60. / 3.28084
"""
Cliping
"""
def norm_angle_0_2pi(a):
while a > 2. * math.pi:
a -= 2. * math.pi
while a <= 0:
a += 2. * math.pi
return a
def norm_angle_mpi_pi(a):
while a > math.pi:
a -= 2. * math.pi
while a <= -math.pi:
a += 2. * math.pi
return a
#
def saturate(_v, _min, _max):
if _v < _min:
return _min
if _v > _max:
return _max
return _v
"""
Plotting
"""
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
my_title_spec = {'color': 'k', 'fontsize': 20}
def save_if(filename):
if filename: matplotlib.pyplot.savefig(filename, dpi=80)
def prepare_fig(fig=None, window_title=None, figsize=(20.48, 10.24), margins=None):
if fig is None:
fig = plt.figure(figsize=figsize)
# else:
# plt.figure(fig.number)
if margins:
left, bottom, right, top, wspace, hspace = margins
fig.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
hspace=hspace, wspace=wspace)
if window_title:
fig.canvas.set_window_title(window_title)
return fig
def decorate(ax, title=None, xlab=None, ylab=None, legend=None, xlim=None, ylim=None):
ax.xaxis.grid(color='k', linestyle='-', linewidth=0.2)
ax.yaxis.grid(color='k', linestyle='-', linewidth=0.2)
if xlab:
ax.xaxis.set_label_text(xlab)
if ylab:
ax.yaxis.set_label_text(ylab)
if title:
ax.set_title(title, my_title_spec)
if legend is not None:
ax.legend(legend, loc='best')
if xlim is not None:
ax.set_xlim(xlim[0], xlim[1])
if ylim is not None:
ax.set_ylim(ylim[0], ylim[1])
def ensure_ylim(ax, yspan):
ymin, ymax = ax.get_ylim()
if ymax - ymin < yspan:
ym = (ymin + ymax) / 2
ax.set_ylim(ym - yspan / 2, ym + yspan / 2)
def write_text(nrows, ncols, plot_number, text, colspan=1, loc=[[0.5, 9.7]], filename=None):
# ax = plt.subplot(nrows, ncols, plot_number)
gs = gridspec.GridSpec(nrows, ncols)
row, col = divmod(plot_number - 1, ncols)
ax = plt.subplot(gs[row, col:col + colspan])
plt.axis([0, 10, 0, 10])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
for i in range(0, len(text)):
plt.text(loc[i][0], loc[i][1], text[i], ha='left', va='top')
save_if(filename)
def plot_in_grid(time, plots, ncol, figure=None, window_title="None", legend=None, filename=None,
margins=(0.04, 0.08, 0.93, 0.96, 0.20, 0.34)):
nrow = math.ceil(len(plots) / float(ncol))
figsize = (10.24 * ncol, 2.56 * nrow)
figure = prepare_fig(figure, window_title, figsize=figsize, margins=margins)
# pdb.set_trace()
for i, (title, ylab, data) in enumerate(plots):
ax = figure.add_subplot(nrow, ncol, i + 1)
ax.plot(time, data)
decorate(ax, title=title, ylab=ylab)
if legend is not None:
ax.legend(legend, loc='best')
save_if(filename)
return figure
"""
Misc
"""
def num_jacobian(X, U, P, dyn):
s_size = len(X)
i_size = len(U)
epsilonX = (0.1 * np.ones(s_size)).tolist()
dX = np.diag(epsilonX)
A = np.zeros((s_size, s_size))
for i in range(0, s_size):
dx = dX[i, :]
delta_f = dyn(X + dx / 2, 0, U, P) - dyn(X - dx / 2, 0, U, P)
delta_f = delta_f / dx[i]
# print delta_f
A[:, i] = delta_f
epsilonU = (0.1 * np.ones(i_size)).tolist()
dU = np.diag(epsilonU)
B = np.zeros((s_size, i_size))
for i in range(0, i_size):
du = dU[i, :]
delta_f = dyn(X, 0, U + du / 2, P) - dyn(X, 0, U - du / 2, P)
delta_f = delta_f / du[i]
B[:, i] = delta_f
return A, B
def saturate(V, Sats):
Vsat = np.array(V)
for i in range(0, len(V)):
if Vsat[i] < Sats[i, 0]:
Vsat[i] = Sats[i, 0]
elif Vsat[i] > Sats[i, 1]:
Vsat[i] = Sats[i, 1]
return Vsat
def print_lti_dynamics(A, B, txt=None, print_original_form=False, print_modal_form=False):
if txt:
print txt
if print_original_form:
print "A\n", A
print "B\n", B
w, M = np.linalg.eig(A)
print "modes \n", w
if print_modal_form:
# print "eigen vectors\n", M
# invM = np.linalg.inv(M)
# print "invM\n", invM
# Amod = np.dot(np.dot(invM, A), M)
# print "Amod\n", Amod
for i in range(len(w)):
print w[i], "->", M[:, i]
| gpl-2.0 |
ashhher3/scikit-learn | sklearn/tree/tests/test_tree.py | 9 | 46546 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.random import sample_without_replacement
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, return_indicator=True, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
"""Check classification on a toy dataset."""
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
"""Check classification on a weighted toy dataset."""
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
"""Check regression on a toy dataset."""
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
"""Check on a XOR problem"""
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
"""Check consistency on dataset iris."""
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
"""Check consistency on dataset boston house prices."""
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
"""Predict probabilities using DecisionTreeClassifier."""
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
"""Check the array representation."""
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
"""Check when y is pure."""
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
"""Check numerical stability."""
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
"""Check variable importances."""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
"""Check if variable importance before fit raises ValueError. """
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
"""Check that gini is equivalent to mse for binary output variable"""
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
"""Check max_features."""
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
"""Test that it gives proper exception on deficient input."""
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(Exception, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(Exception, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
def test_min_samples_leaf():
"""Test if leaves contain more than leaf_count training examples"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
"""Check that tree estimator are pickable """
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
"""Check estimators on multi-output problems."""
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
"""Test that n_classes_ and classes_ have proper shape."""
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
"""Check class rebalancing."""
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
"""Check that it works no matter the memory layout"""
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
"""Check sample weighting."""
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
"""Check sample weighting raises errors."""
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'auto' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='auto', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='auto', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight**2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
"""Test if class_weight raises errors and warnings when expected."""
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
"""Test greedy trees with max_depth + 1 leafs. """
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
"""Test preceedence of max_leaf_nodes over max_depth. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
"""Ensure property arrays' memory stays alive when tree disappears
non-regression for #2726
"""
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
"""Test if the warning for too large inputs is appropriate."""
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
| bsd-3-clause |
Pybonacci/bezierbuilder | bezierbuilder.py | 1 | 6844 | # BézierBuilder
#
# Copyright (c) 2013, Juan Luis Cano Rodríguez <juanlu001@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""BézierBuilder, an interactive Bézier curve explorer.
Just run it with
$ python bezier_builder.py
"""
import matplotlib
# WebAgg canvas requires matplotlib >= 1.3 and tornado
#matplotlib.use('webagg')
import numpy as np
from scipy.special import binom
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
class BezierBuilder(object):
"""Bézier curve interactive builder.
"""
def __init__(self, control_polygon, ax_bernstein):
"""Constructor.
Receives the initial control polygon of the curve.
"""
self.control_polygon = control_polygon
self.xp = list(control_polygon.get_xdata())
self.yp = list(control_polygon.get_ydata())
self.canvas = control_polygon.figure.canvas
self.ax_main = control_polygon.axes
self.ax_bernstein = ax_bernstein
# Event handler for mouse clicking
self.canvas.mpl_connect('button_press_event', self.on_button_press)
self.canvas.mpl_connect('button_release_event', self.on_button_release)
self.canvas.mpl_connect('key_press_event', self.on_key_press)
self.canvas.mpl_connect('key_release_event', self.on_key_release)
self.canvas.mpl_connect('motion_notify_event', self.on_motion_notify)
# Create Bézier curve
line_bezier = Line2D([], [],
c=control_polygon.get_markeredgecolor())
self.bezier_curve = self.ax_main.add_line(line_bezier)
self._shift_is_held = False
self._ctrl_is_held = False
self._index = None # Active vertex
def on_button_press(self, event):
# Ignore clicks outside axes
if event.inaxes != self.ax_main: return
if self._shift_is_held or self._ctrl_is_held:
res, ind = self.control_polygon.contains(event)
if res:
self._index = ind['ind'][0]
if self._ctrl_is_held:
self._remove_point(event)
else:
return
else:
self._add_point(event)
def on_button_release(self, event):
if event.button != 1: return
self._index = None
def on_key_press(self, event):
if event.key == 'shift':
self._shift_is_held = True
elif event.key == 'control':
self._ctrl_is_held = True
def on_key_release(self, event):
if event.key == 'shift':
self._shift_is_held = False
elif event.key == 'control':
self._ctrl_is_held = False
def on_motion_notify(self, event):
if event.inaxes != self.ax_main: return
if self._index is None: return
x, y = event.xdata, event.ydata
self.xp[self._index] = x
self.yp[self._index] = y
self.control_polygon.set_data(self.xp, self.yp)
self._update_bezier()
def _add_point(self, event):
self.xp.append(event.xdata)
self.yp.append(event.ydata)
self.control_polygon.set_data(self.xp, self.yp)
# Rebuild Bézier curve and update canvas
self._update_bernstein()
self._update_bezier()
def _remove_point(self, event):
del self.xp[self._index]
del self.yp[self._index]
self.control_polygon.set_data(self.xp, self.yp)
# Rebuild Bézier curve and update canvas
self._update_bernstein()
self._update_bezier()
def _build_bezier(self):
x, y = Bezier(list(zip(self.xp, self.yp))).T
return x, y
def _update_bezier(self):
self.bezier_curve.set_data(*self._build_bezier())
self.canvas.draw()
def _update_bernstein(self):
N = len(self.xp) - 1
t = np.linspace(0, 1, num=200)
ax = self.ax_bernstein
ax.clear()
for kk in range(N + 1):
ax.plot(t, Bernstein(N, kk)(t))
if N > 0:
ax.set_title("Bernstein basis, N = {}".format(N))
else:
ax.set_title("Bernstein basis")
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
def Bernstein(n, k):
"""Bernstein polynomial.
"""
coeff = binom(n, k)
def _bpoly(x):
return coeff * x ** k * (1 - x) ** (n - k)
return _bpoly
def Bezier(points, num=200):
"""Build Bézier curve from points.
"""
N = len(points)
t = np.linspace(0, 1, num=num)
curve = np.zeros((num, 2))
for ii in range(N):
curve += np.outer(Bernstein(N - 1, ii)(t), points[ii])
return curve
if __name__ == '__main__':
# Initial setup
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
# Empty line
line = Line2D([], [], ls='--', c='#666666',
marker='x', mew=2, mec='#204a87')
ax1.add_line(line)
# Canvas limits
ax1.set_xlim(0, 1)
ax1.set_ylim(0, 1)
ax1.set_title("Bézier curve")
# Bernstein plot
ax2.set_title("Bernstein basis")
# Create BezierBuilder
bezier_builder = BezierBuilder(line, ax2)
fig.suptitle("BézierBuilder", fontsize=24)
fig.text(0.052, 0.07, "Click to add points, Shift + Click & Drag to move them, "
"Ctrl + Click to remove them.", color="#333333")
fig.text(0.052, 0.03, "(c) 2013, Juan Luis Cano Rodríguez. Code available at "
"https://github.com/Pybonacci/bezierbuilder/", color="#666666")
fig.subplots_adjust(left=0.08, top=0.8, right=0.92, bottom=0.2)
plt.show()
| bsd-2-clause |
billy-inn/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
aolindahl/streaking | area_fill.py | 1 | 3843 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 29 08:55:06 2015
@author: antlin
"""
import numpy as np
import process_hdf5
import matplotlib.pyplot as plt
def zero_crossing_area(y):
# Find zero crossings around peak
i_max = np.argmax(y)
i_end = i_max + np.argmax(y[i_max:] < 0)
i_start = i_max - np.argmax(y[i_max:: -1] < 0) + 1
if (i_end <= i_max) or (i_start > i_max):
return np.nan, [np.nan, np.nan]
return y[i_start: i_end].sum(), [i_start, i_end]
if __name__ == '__main__':
plt.ion()
# get a file
h5 = process_hdf5.load_file(process_hdf5.h5_file_name_funk(101))
# list the content
process_hdf5.list_hdf5_content(h5)
# get the energy scale
e_axis_eV_full = h5['energy_scale_eV'].value
# e_slice = slice(np.searchsorted(e_axis_eV_full, 65),
# np.searchsorted(e_axis_eV_full, 150))
# e_slice = slice(None)
e_slice = slice(np.searchsorted(e_axis_eV_full, 50), None)
e_axis_eV = e_axis_eV_full[e_slice]
de = np.mean(np.diff(e_axis_eV_full))
# pick a trace
trace = h5['energy_signal'][
np.random.randint(h5['energy_signal'].shape[0]), e_slice]
fig = plt.figure('trace')
plt.clf()
plt.plot(e_axis_eV, trace)
plt.plot(e_axis_eV, np.zeros_like(e_axis_eV), '--k')
# Find zero crossings around peak
A_peak, [i_start, i_end] = zero_crossing_area(trace)
A_peak *= 1.1
plt.plot(e_axis_eV[i_start: i_end], trace[i_start: i_end], 'og')
fig.canvas.draw()
i_order = np.argsort(trace).tolist()[::-1]
temp_start = i_start = i_order[0]
temp_end = i_end = i_order[0]
def area_check(y, start, end, A):
selection = y[start: end+1]
a = (selection * (selection > 0)).sum()
return a >= A
A = 0
i_last = len(trace) - 1
exit_flag = False
i_list = []
for i_num, i, in enumerate(i_order):
if (i_start <= i) and (i <= i_end):
continue
if (i_start - 1 > i) or (i_end + 1 < i):
i_list.append(i)
# plt.plot(e_axis_eV[i_start: i_end], trace[i_start: i_end], '.r')
# plt.plot(e_axis_eV[i], trace[i], 'c^')
# fig.canvas.draw()
# raw_input('enter...')
while i < i_start:
i_start -= 1
if (trace[i_start] > trace[i_start + 1]):
while (((i_end < i_last) and
(trace[i_end + 1] < 0) and (trace[i_end] < 0)) and
(trace[i_end - 1] < trace[i_start])):
i_end -= 1
else:
while (((trace[i_start] < 0) and (trace[i_end] > 0)) and
(i_end < i_last) and (trace[i_end] > trace[i_start])):
i_end += 1
if area_check(trace, i_start, i_end, A_peak):
exit_flag = True
break
while i > i_end:
i_end += 1
if (trace[i_end] > trace[i_end - 1]):
while (((i_start > 0) and
(trace[i_start - 1] < 0) and (trace[i_start] < 0)) and
(trace[i_start + 1] < trace[i_end])):
i_start += 1
else:
while (((trace[i_end] < 0) and (trace[i_start] > 0)) and
(i_start > 0) and (trace[i_start] > trace[i_end])):
i_start -= 1
if area_check(trace, i_start, i_end, A_peak):
exit_flag = True
break
if exit_flag:
while trace[i_start] < 0:
i_start += 1
while trace[i_end] < 0:
i_end -= 1
break
plt.plot(e_axis_eV[i_start: i_end+1], trace[i_start: i_end+1], '.r')
plt.plot(e_axis_eV[i_list], trace[i_list], 'co', markersize=15,
markerfacecolor='none', mec='c', mew=2)
| gpl-2.0 |
jrh154/ChibbarGroup | Phylogeny Scripts/sequence_combiner.py | 2 | 1372 | '''
Combines fasta files based on a list of accession numbers with formated headers
Usage:
python sequence_combiner.py file_info.csv fasta_directory out_directory
'''
from os import listdir, remove
from os.path import join, isfile
import pandas as pd
import sys
def File_Reader(file_info):
data_dict = {}
df = pd.read_csv(file_info)
for row in df.iterrows():
key = row[1]["Accession Number"]
name = row[1]["Common Names"]
family = row[1]["Family"]
protein = row[1]["Protein Name"]
data_dict[key] = [name, family, protein]
return data_dict
def File_Combiner(data_dict, fasta_path, out_path):
out_file = join(out_path, 'combined.fasta')
if isfile(out_file):
remove(out_file)
print("I've removed the previous file found here, and saved the new file from this run")
with open(out_file, 'a') as f1:
for key in data_dict:
file_name = join(fasta_path, key + '.fasta')
with open(file_name, 'r') as f2:
for line in f2:
if ">" in line:
f1.write('>' + data_dict[key][0] + ' ' + data_dict[key][1] + ' ' + data_dict[key][2] + '\n')
else:
f1.write(line)
f1.write('\n')
if len(sys.argv) == 4:
data_dict = File_Reader(sys.argv[1])
File_Combiner(data_dict, sys.argv[2], sys.argv[3])
else:
print("Argument length doesn't match; should have 3 arguments:")
print("1) File.csv, 2) Fasta directory, 3) Output directory")
| mit |
gef756/seaborn | seaborn/timeseries.py | 8 | 15217 | """Timeseries plotting functions."""
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats, interpolate
import matplotlib as mpl
import matplotlib.pyplot as plt
from .external.six import string_types
from . import utils
from . import algorithms as algo
from .palettes import color_palette
def tsplot(data, time=None, unit=None, condition=None, value=None,
err_style="ci_band", ci=68, interpolate=True, color=None,
estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None,
legend=True, ax=None, **kwargs):
"""Plot one or more timeseries with flexible representation of uncertainty.
This function is intended to be used with data where observations are
nested within sampling units that were measured at multiple timepoints.
It can take data specified either as a long-form (tidy) DataFrame or as an
ndarray with dimensions (unit, time) The interpretation of some of the
other parameters changes depending on the type of object passed as data.
Parameters
----------
data : DataFrame or ndarray
Data for the plot. Should either be a "long form" dataframe or an
array with dimensions (unit, time, condition). In both cases, the
condition field/dimension is optional. The type of this argument
determines the interpretation of the next few parameters. When
using a DataFrame, the index has to be sequential.
time : string or series-like
Either the name of the field corresponding to time in the data
DataFrame or x values for a plot when data is an array. If a Series,
the name will be used to label the x axis.
unit : string
Field in the data DataFrame identifying the sampling unit (e.g.
subject, neuron, etc.). The error representation will collapse over
units at each time/condition observation. This has no role when data
is an array.
value : string
Either the name of the field corresponding to the data values in
the data DataFrame (i.e. the y coordinate) or a string that forms
the y axis label when data is an array.
condition : string or Series-like
Either the name of the field identifying the condition an observation
falls under in the data DataFrame, or a sequence of names with a length
equal to the size of the third dimension of data. There will be a
separate trace plotted for each condition. If condition is a Series
with a name attribute, the name will form the title for the plot
legend (unless legend is set to False).
err_style : string or list of strings or None
Names of ways to plot uncertainty across units from set of
{ci_band, ci_bars, boot_traces, boot_kde, unit_traces, unit_points}.
Can use one or more than one method.
ci : float or list of floats in [0, 100]
Confidence interval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
interpolate : boolean
Whether to do a linear interpolation between each timepoint when
plotting. The value of this parameter also determines the marker
used for the main plot traces, unless marker is specified as a keyword
argument.
color : seaborn palette or matplotlib color name or dictionary
Palette or color for the main plots and error representation (unless
plotting by unit, which can be separately controlled with err_palette).
If a dictionary, should map condition name to color spec.
estimator : callable
Function to determine central tendency and to pass to bootstrap
must take an ``axis`` argument.
n_boot : int
Number of bootstrap iterations.
err_palette : seaborn palette
Palette name or list of colors used when plotting data for each unit.
err_kws : dict, optional
Keyword argument dictionary passed through to matplotlib function
generating the error plot,
legend : bool, optional
If ``True`` and there is a ``condition`` variable, add a legend to
the plot.
ax : axis object, optional
Plot in given axis; if None creates a new figure
kwargs :
Other keyword arguments are passed to main plot() call
Returns
-------
ax : matplotlib axis
axis with plot data
Examples
--------
Plot a trace with translucent confidence bands:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(22)
>>> import seaborn as sns; sns.set(color_codes=True)
>>> x = np.linspace(0, 15, 31)
>>> data = np.sin(x) + np.random.rand(10, 31) + np.random.randn(10, 1)
>>> ax = sns.tsplot(data=data)
Plot a long-form dataframe with several conditions:
.. plot::
:context: close-figs
>>> gammas = sns.load_dataset("gammas")
>>> ax = sns.tsplot(time="timepoint", value="BOLD signal",
... unit="subject", condition="ROI",
... data=gammas)
Use error bars at the positions of the observations:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="ci_bars", color="g")
Don't interpolate between the observations:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> ax = sns.tsplot(data=data, err_style="ci_bars", interpolate=False)
Show multiple confidence bands:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, ci=[68, 95], color="m")
Use a different estimator:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, estimator=np.median)
Show each bootstrap resample:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="boot_traces", n_boot=500)
Show the trace from each sampling unit:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="unit_traces")
"""
# Sort out default values for the parameters
if ax is None:
ax = plt.gca()
if err_kws is None:
err_kws = {}
# Handle different types of input data
if isinstance(data, pd.DataFrame):
xlabel = time
ylabel = value
# Condition is optional
if condition is None:
condition = pd.Series(np.ones(len(data)))
legend = False
legend_name = None
n_cond = 1
else:
legend = True and legend
legend_name = condition
n_cond = len(data[condition].unique())
else:
data = np.asarray(data)
# Data can be a timecourse from a single unit or
# several observations in one condition
if data.ndim == 1:
data = data[np.newaxis, :, np.newaxis]
elif data.ndim == 2:
data = data[:, :, np.newaxis]
n_unit, n_time, n_cond = data.shape
# Units are experimental observations. Maybe subjects, or neurons
if unit is None:
units = np.arange(n_unit)
unit = "unit"
units = np.repeat(units, n_time * n_cond)
ylabel = None
# Time forms the xaxis of the plot
if time is None:
times = np.arange(n_time)
else:
times = np.asarray(time)
xlabel = None
if hasattr(time, "name"):
xlabel = time.name
time = "time"
times = np.tile(np.repeat(times, n_cond), n_unit)
# Conditions split the timeseries plots
if condition is None:
conds = range(n_cond)
legend = False
if isinstance(color, dict):
err = "Must have condition names if using color dict."
raise ValueError(err)
else:
conds = np.asarray(condition)
legend = True and legend
if hasattr(condition, "name"):
legend_name = condition.name
else:
legend_name = None
condition = "cond"
conds = np.tile(conds, n_unit * n_time)
# Value forms the y value in the plot
if value is None:
ylabel = None
else:
ylabel = value
value = "value"
# Convert to long-form DataFrame
data = pd.DataFrame(dict(value=data.ravel(),
time=times,
unit=units,
cond=conds))
# Set up the err_style and ci arguments for the loop below
if isinstance(err_style, string_types):
err_style = [err_style]
elif err_style is None:
err_style = []
if not hasattr(ci, "__iter__"):
ci = [ci]
# Set up the color palette
if color is None:
current_palette = mpl.rcParams["axes.color_cycle"]
if len(current_palette) < n_cond:
colors = color_palette("husl", n_cond)
else:
colors = color_palette(n_colors=n_cond)
elif isinstance(color, dict):
colors = [color[c] for c in data[condition].unique()]
else:
try:
colors = color_palette(color, n_cond)
except ValueError:
color = mpl.colors.colorConverter.to_rgb(color)
colors = [color] * n_cond
# Do a groupby with condition and plot each trace
for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)):
df_c = df_c.pivot(unit, time, value)
x = df_c.columns.values.astype(np.float)
# Bootstrap the data for confidence intervals
boot_data = algo.bootstrap(df_c.values, n_boot=n_boot,
axis=0, func=estimator)
cis = [utils.ci(boot_data, v, axis=0) for v in ci]
central_data = estimator(df_c.values, axis=0)
# Get the color for this condition
color = colors[c]
# Use subroutines to plot the uncertainty
for style in err_style:
# Allow for null style (only plot central tendency)
if style is None:
continue
# Grab the function from the global environment
try:
plot_func = globals()["_plot_%s" % style]
except KeyError:
raise ValueError("%s is not a valid err_style" % style)
# Possibly set up to plot each observation in a different color
if err_palette is not None and "unit" in style:
orig_color = color
color = color_palette(err_palette, len(df_c.values))
# Pass all parameters to the error plotter as keyword args
plot_kwargs = dict(ax=ax, x=x, data=df_c.values,
boot_data=boot_data,
central_data=central_data,
color=color, err_kws=err_kws)
# Plot the error representation, possibly for multiple cis
for ci_i in cis:
plot_kwargs["ci"] = ci_i
plot_func(**plot_kwargs)
if err_palette is not None and "unit" in style:
color = orig_color
# Plot the central trace
kwargs.setdefault("marker", "" if interpolate else "o")
ls = kwargs.pop("ls", "-" if interpolate else "")
kwargs.setdefault("linestyle", ls)
label = cond if legend else "_nolegend_"
ax.plot(x, central_data, color=color, label=label, **kwargs)
# Pad the sides of the plot only when not interpolating
ax.set_xlim(x.min(), x.max())
x_diff = x[1] - x[0]
if not interpolate:
ax.set_xlim(x.min() - x_diff, x.max() + x_diff)
# Add the plot labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if legend:
ax.legend(loc=0, title=legend_name)
return ax
# Subroutines for tsplot errorbar plotting
# ----------------------------------------
def _plot_ci_band(ax, x, ci, color, err_kws, **kwargs):
"""Plot translucent error bands around the central tendancy."""
low, high = ci
if "alpha" not in err_kws:
err_kws["alpha"] = 0.2
ax.fill_between(x, low, high, color=color, **err_kws)
def _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs):
"""Plot error bars at each data point."""
for x_i, y_i, (low, high) in zip(x, central_data, ci.T):
ax.plot([x_i, x_i], [low, high], color=color,
solid_capstyle="round", **err_kws)
def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):
"""Plot 250 traces from bootstrap."""
err_kws.setdefault("alpha", 0.25)
err_kws.setdefault("linewidth", 0.25)
if "lw" in err_kws:
err_kws["linewidth"] = err_kws.pop("lw")
ax.plot(x, boot_data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs):
"""Plot a trace for each observation in the original data."""
if isinstance(color, list):
if "alpha" not in err_kws:
err_kws["alpha"] = .5
for i, obs in enumerate(data):
ax.plot(x, obs, color=color[i], label="_nolegend_", **err_kws)
else:
if "alpha" not in err_kws:
err_kws["alpha"] = .2
ax.plot(x, data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_points(ax, x, data, color, err_kws, **kwargs):
"""Plot each original data point discretely."""
if isinstance(color, list):
for i, obs in enumerate(data):
ax.plot(x, obs, "o", color=color[i], alpha=0.8, markersize=4,
label="_nolegend_", **err_kws)
else:
ax.plot(x, data.T, "o", color=color, alpha=0.5, markersize=4,
label="_nolegend_", **err_kws)
def _plot_boot_kde(ax, x, boot_data, color, **kwargs):
"""Plot the kernal density estimate of the bootstrap distribution."""
kwargs.pop("data")
_ts_kde(ax, x, boot_data, color, **kwargs)
def _plot_unit_kde(ax, x, data, color, **kwargs):
"""Plot the kernal density estimate over the sample."""
_ts_kde(ax, x, data, color, **kwargs)
def _ts_kde(ax, x, data, color, **kwargs):
"""Upsample over time and plot a KDE of the bootstrap distribution."""
kde_data = []
y_min, y_max = data.min(), data.max()
y_vals = np.linspace(y_min, y_max, 100)
upsampler = interpolate.interp1d(x, data)
data_upsample = upsampler(np.linspace(x.min(), x.max(), 100))
for pt_data in data_upsample.T:
pt_kde = stats.kde.gaussian_kde(pt_data)
kde_data.append(pt_kde(y_vals))
kde_data = np.transpose(kde_data)
rgb = mpl.colors.ColorConverter().to_rgb(color)
img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4))
img[:, :, :3] = rgb
kde_data /= kde_data.max(axis=0)
kde_data[kde_data > 1] = 1
img[:, :, 3] = kde_data
ax.imshow(img, interpolation="spline16", zorder=2,
extent=(x.min(), x.max(), y_min, y_max),
aspect="auto", origin="lower")
| bsd-3-clause |
shuggiefisher/brain4k | brain4k/transforms/b4k/__init__.py | 2 | 3461 | import logging
import itertools
from copy import deepcopy
import pandas as pd
from brain4k.transforms import PipelineStage
class DataJoin(PipelineStage):
"""
Perform a left join across two datasources.
Useful in case one stage of the pipeline depends upon results generated
at a prior stage, and the inputs have to be matched via an index
"""
name = "org.brain4k.transforms.DataJoin"
def join(self):
if len(self.inputs) != 2:
raise ValueError("Expecting two inputs to perform a join")
if len(self.outputs) != 1:
raise ValueError("Expecting one output for saving the join results")
logging.info(
"Starting join between {0} and {1} for {2}..."\
.format(
self.inputs[0].filename,
self.inputs[1].filename,
self.outputs[0].filename
)
)
left_index = self.inputs[0].io.read_all([self.parameters['left_on']])
left_index_flattened = left_index[self.parameters['left_on']].flatten()
# create a minimal dataframe for the left part of the join
left = pd.DataFrame({self.parameters['left_on']: left_index_flattened})
right_keys = set([self.parameters['right_on']]) | set(self.parameters['retain_keys']['right'])
right = self.inputs[1].io.read_all(usecols=list(right_keys))
df = pd.merge(
left,
right,
how='left',
left_on=self.parameters['left_on'],
right_on=self.parameters['right_on']
).dropna()
h5py_file = self.outputs[0].io.open(mode='w')
h5py_left = self.inputs[0].io.open()
left_output_keys = {k: v for k, v in self.parameters['output_keys'].iteritems() if k in self.parameters['retain_keys']['left']}
right_output_keys = {k: v for k, v in self.parameters['output_keys'].iteritems() if k in self.parameters['retain_keys']['right']}
for keyset in (left_output_keys, right_output_keys):
for key in keyset:
keyset[key]['shape'][0] = df.shape[0]
self.outputs[0].io.create_dataset(
h5py_file,
self.parameters['output_keys'],
deepcopy(left_output_keys).update(right_output_keys)
)
# first copy the left side in chunks
write_chunk_size = 1000
for index, chunk in enumerate(grouper(write_chunk_size, df.index)):
self.outputs[0].io.write_chunk(
h5py_file,
{k: h5py_left[k][list(chunk)] for k in left_output_keys.keys()},
left_output_keys,
index*write_chunk_size
)
self.inputs[0].io.close(h5py_left)
# now copy the right side in from the merged dataframe
self.outputs[0].io.write_chunk(
h5py_file,
# may be better to do this the conversion within the method
{k: df[k].values.astype(right_output_keys[k]['dtype']) for k in right_output_keys.keys()},
right_output_keys
)
self.outputs[0].io.save(h5py_file)
logging.info(
"Completed join saved as {0}".format(self.outputs[0].filename)
)
def grouper(n, iterable):
"""
Iterate through a iterator in batches of n
"""
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk | apache-2.0 |
appapantula/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
SciTools/cartopy | lib/cartopy/tests/mpl/test_web_services.py | 2 | 1691 | # Copyright Cartopy Contributors
#
# This file is part of Cartopy and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import cleanup
import pytest
from cartopy.tests.mpl import ImageTesting
import cartopy.crs as ccrs
from cartopy.io.ogc_clients import _OWSLIB_AVAILABLE
@pytest.mark.filterwarnings("ignore:TileMatrixLimits")
@pytest.mark.network
@pytest.mark.skipif(not _OWSLIB_AVAILABLE, reason='OWSLib is unavailable.')
@pytest.mark.xfail(raises=KeyError, reason='OWSLib WMTS support is broken.')
@ImageTesting(['wmts'], tolerance=0)
def test_wmts():
ax = plt.axes(projection=ccrs.PlateCarree())
url = 'https://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'
# Use a layer which doesn't change over time.
ax.add_wmts(url, 'MODIS_Water_Mask')
@pytest.mark.network
@pytest.mark.skipif(not _OWSLIB_AVAILABLE, reason='OWSLib is unavailable.')
@cleanup
def test_wms_tight_layout():
ax = plt.axes(projection=ccrs.PlateCarree())
url = 'http://vmap0.tiles.osgeo.org/wms/vmap0'
layer = 'basic'
ax.add_wms(url, layer)
ax.figure.tight_layout()
@pytest.mark.network
@pytest.mark.xfail((5, 0, 0) <= ccrs.PROJ4_VERSION < (5, 1, 0),
reason='Proj Orthographic projection is buggy.',
strict=True)
@pytest.mark.skipif(not _OWSLIB_AVAILABLE, reason='OWSLib is unavailable.')
@ImageTesting(['wms'], tolerance=0.02)
def test_wms():
ax = plt.axes(projection=ccrs.Orthographic())
url = 'http://vmap0.tiles.osgeo.org/wms/vmap0'
layer = 'basic'
ax.add_wms(url, layer)
| lgpl-3.0 |
jkarnows/scikit-learn | examples/ensemble/plot_voting_decision_regions.py | 230 | 2386 | """
==================================================
Plot the decision boundaries of a VotingClassifier
==================================================
Plot the decision boundaries of a `VotingClassifier` for
two features of the Iris dataset.
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`DecisionTreeClassifier`,
`KNeighborsClassifier`, and `SVC`) and used to initialize a
soft-voting `VotingClassifier` with weights `[2, 1, 2]`, which means that
the predicted probabilities of the `DecisionTreeClassifier` and `SVC`
count 5 times as much as the weights of the `KNeighborsClassifier` classifier
when the averaged probability is calculated.
"""
print(__doc__)
from itertools import product
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import VotingClassifier
# Loading some example data
iris = datasets.load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
# Training classifiers
clf1 = DecisionTreeClassifier(max_depth=4)
clf2 = KNeighborsClassifier(n_neighbors=7)
clf3 = SVC(kernel='rbf', probability=True)
eclf = VotingClassifier(estimators=[('dt', clf1), ('knn', clf2),
('svc', clf3)],
voting='soft', weights=[2, 1, 2])
clf1.fit(X, y)
clf2.fit(X, y)
clf3.fit(X, y)
eclf.fit(X, y)
# Plotting decision regions
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(2, 2, sharex='col', sharey='row', figsize=(10, 8))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
[clf1, clf2, clf3, eclf],
['Decision Tree (depth=4)', 'KNN (k=7)',
'Kernel SVM', 'Soft Voting']):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.4)
axarr[idx[0], idx[1]].scatter(X[:, 0], X[:, 1], c=y, alpha=0.8)
axarr[idx[0], idx[1]].set_title(tt)
plt.show()
| bsd-3-clause |
kambysese/mne-python | tutorials/source-modeling/plot_beamformer_lcmv.py | 10 | 12809 | """
Source reconstruction using an LCMV beamformer
==============================================
This tutorial gives an overview of the beamformer method
and shows how to reconstruct source activity using an LCMV beamformer.
"""
# Authors: Britta Westner <britta.wstnr@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample, fetch_fsaverage
from mne.beamformer import make_lcmv, apply_lcmv
###############################################################################
# Introduction to beamformers
# ---------------------------
# A beamformer is a spatial filter that reconstructs source activity by
# scanning through a grid of pre-defined source points and estimating activity
# at each of those source points independently. A set of weights is
# constructed for each defined source location which defines the contribution
# of each sensor to this source.
# Beamformers are often used for their focal reconstructions and their ability
# to reconstruct deeper sources. They can also suppress external noise sources.
# The beamforming method applied in this tutorial is the linearly constrained
# minimum variance (LCMV) beamformer :footcite:`VanVeenEtAl1997` operates on
# time series.
# Frequency-resolved data can be reconstructed with the dynamic imaging of
# coherent sources (DICS) beamforming method :footcite:`GrossEtAl2001`.
# As we will see in the following, the spatial filter is computed from two
# ingredients: the forward model solution and the covariance matrix of the
# data.
###############################################################################
# Data processing
# ---------------
# We will use the sample data set for this tutorial and reconstruct source
# activity on the trials with left auditory stimulation.
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Read the raw data
raw = mne.io.read_raw_fif(raw_fname)
raw.info['bads'] = ['MEG 2443'] # bad MEG channel
# Set up the epoching
event_id = 1 # those are the trials with left-ear auditory stimuli
tmin, tmax = -0.2, 0.5
events = mne.find_events(raw)
# pick relevant channels
raw.pick(['meg', 'eog']) # pick channels of interest
# Create epochs
proj = False # already applied
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), preload=True, proj=proj,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
# for speed purposes, cut to a window of interest
evoked = epochs.average().crop(0.05, 0.15)
# Visualize averaged sensor space data
evoked.plot_joint()
del raw # save memory
###############################################################################
# Computing the covariance matrices
# ---------------------------------
# Spatial filters use the data covariance to estimate the filter
# weights. The data covariance matrix will be `inverted`_ during the spatial
# filter computation, so it is valuable to plot the covariance matrix and its
# eigenvalues to gauge whether matrix inversion will be possible.
# Also, because we want to combine different channel types (magnetometers and
# gradiometers), we need to account for the different amplitude scales of these
# channel types. To do this we will supply a noise covariance matrix to the
# beamformer, which will be used for whitening.
# The data covariance matrix should be estimated from a time window that
# includes the brain signal of interest,
# and incorporate enough samples for a stable estimate. A rule of thumb is to
# use more samples than there are channels in the data set; see
# :footcite:`BrookesEtAl2008` for more detailed advice on covariance estimation
# for beamformers. Here, we use a time
# window incorporating the expected auditory response at around 100 ms post
# stimulus and extend the period to account for a low number of trials (72) and
# low sampling rate of 150 Hz.
data_cov = mne.compute_covariance(epochs, tmin=0.01, tmax=0.25,
method='empirical')
noise_cov = mne.compute_covariance(epochs, tmin=tmin, tmax=0,
method='empirical')
data_cov.plot(epochs.info)
del epochs
###############################################################################
# When looking at the covariance matrix plots, we can see that our data is
# slightly rank-deficient as the rank is not equal to the number of channels.
# Thus, we will have to regularize the covariance matrix before inverting it
# in the beamformer calculation. This can be achieved by setting the parameter
# ``reg=0.05`` when calculating the spatial filter with
# :func:`~mne.beamformer.make_lcmv`. This corresponds to loading the diagonal
# of the covariance matrix with 5% of the sensor power.
###############################################################################
# The forward model
# -----------------
# The forward model is the other important ingredient for the computation of a
# spatial filter. Here, we will load the forward model from disk; more
# information on how to create a forward model can be found in this tutorial:
# :ref:`tut-forward`.
# Note that beamformers are usually computed in a :class:`volume source space
# <mne.VolSourceEstimate>`, because estimating only cortical surface
# activation can misrepresent the data.
# Read forward model
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-vol-7-fwd.fif'
forward = mne.read_forward_solution(fwd_fname)
###############################################################################
# Handling depth bias
# -------------------
#
# The forward model solution is inherently biased toward superficial sources.
# When analyzing single conditions it is best to mitigate the depth bias
# somehow. There are several ways to do this:
#
# - :func:`mne.beamformer.make_lcmv` has a ``depth`` parameter that normalizes
# the forward model prior to computing the spatial filters. See the docstring
# for details.
# - Unit-noise gain beamformers handle depth bias by normalizing the
# weights of the spatial filter. Choose this by setting
# ``weight_norm='unit-noise-gain'``.
# - When computing the Neural activity index, the depth bias is handled by
# normalizing both the weights and the estimated noise (see
# :footcite:`VanVeenEtAl1997`). Choose this by setting ``weight_norm='nai'``.
#
# Note that when comparing conditions, the depth bias will cancel out and it is
# possible to set both parameters to ``None``.
#
#
# Compute the spatial filter
# --------------------------
# Now we can compute the spatial filter. We'll use a unit-noise gain beamformer
# to deal with depth bias, and will also optimize the orientation of the
# sources such that output power is maximized.
# This is achieved by setting ``pick_ori='max-power'``.
# This gives us one source estimate per source (i.e., voxel), which is known
# as a scalar beamformer.
filters = make_lcmv(evoked.info, forward, data_cov, reg=0.05,
noise_cov=noise_cov, pick_ori='max-power',
weight_norm='unit-noise-gain', rank=None)
# You can save the filter for later use with:
# filters.save('filters-lcmv.h5')
###############################################################################
# It is also possible to compute a vector beamformer, which gives back three
# estimates per voxel, corresponding to the three direction components of the
# source. This can be achieved by setting
# ``pick_ori='vector'`` and will yield a :class:`volume vector source estimate
# <mne.VolVectorSourceEstimate>`. So we will compute another set of filters
# using the vector beamformer approach:
filters_vec = make_lcmv(evoked.info, forward, data_cov, reg=0.05,
noise_cov=noise_cov, pick_ori='vector',
weight_norm='unit-noise-gain', rank=None)
# save a bit of memory
src = forward['src']
del forward
###############################################################################
# Apply the spatial filter
# ------------------------
# The spatial filter can be applied to different data types: raw, epochs,
# evoked data or the data covariance matrix to gain a static image of power.
# The function to apply the spatial filter to :class:`~mne.Evoked` data is
# :func:`~mne.beamformer.apply_lcmv` which is
# what we will use here. The other functions are
# :func:`~mne.beamformer.apply_lcmv_raw`,
# :func:`~mne.beamformer.apply_lcmv_epochs`, and
# :func:`~mne.beamformer.apply_lcmv_cov`.
stc = apply_lcmv(evoked, filters, max_ori_out='signed')
stc_vec = apply_lcmv(evoked, filters_vec, max_ori_out='signed')
del filters, filters_vec
###############################################################################
# Visualize the reconstructed source activity
# -------------------------------------------
# We can visualize the source estimate in different ways, e.g. as a volume
# rendering, an overlay onto the MRI, or as an overlay onto a glass brain.
#
# The plots for the scalar beamformer show brain activity in the right temporal
# lobe around 100 ms post stimulus. This is expected given the left-ear
# auditory stimulation of the experiment.
lims = [0.3, 0.45, 0.6]
kwargs = dict(src=src, subject='sample', subjects_dir=subjects_dir,
initial_time=0.087, verbose=True)
###############################################################################
# On MRI slices (orthoview; 2D)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
stc.plot(mode='stat_map', clim=dict(kind='value', pos_lims=lims), **kwargs)
###############################################################################
# On MNI glass brain (orthoview; 2D)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
stc.plot(mode='glass_brain', clim=dict(kind='value', lims=lims), **kwargs)
###############################################################################
# Volumetric rendering (3D) with vectors
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# These plots can also be shown using a volumetric rendering via
# :meth:`~mne.VolVectorSourceEstimate.plot_3d`. Let's try visualizing the
# vector beamformer case. Here we get three source time courses out per voxel
# (one for each component of the dipole moment: x, y, and z), which appear
# as small vectors in the visualization (in the 2D plotters, only the
# magnitude can be shown):
# sphinx_gallery_thumbnail_number = 7
brain = stc_vec.plot_3d(
clim=dict(kind='value', lims=lims), hemi='both',
views=['coronal', 'sagittal', 'axial'], size=(800, 300),
view_layout='horizontal', show_traces=0.3,
brain_kwargs=dict(silhouette=True), **kwargs)
###############################################################################
# Visualize the activity of the maximum voxel with all three components
# ---------------------------------------------------------------------
# We can also visualize all three components in the peak voxel. For this, we
# will first find the peak voxel and then plot the time courses of this voxel.
peak_vox, _ = stc_vec.get_peak(tmin=0.08, tmax=0.1, vert_as_index=True)
ori_labels = ['x', 'y', 'z']
fig, ax = plt.subplots(1)
for ori, label in zip(stc_vec.data[peak_vox, :, :], ori_labels):
ax.plot(stc_vec.times, ori, label='%s component' % label)
ax.legend(loc='lower right')
ax.set(title='Activity per orientation in the peak voxel', xlabel='Time (s)',
ylabel='Amplitude (a. u.)')
mne.viz.utils.plt_show()
del stc_vec
###############################################################################
# Morph the output to fsaverage
# -----------------------------
#
# We can also use volumetric morphing to get the data to fsaverage space. This
# is for example necessary when comparing activity across subjects. Here, we
# will use the scalar beamformer example.
# We pass a :class:`mne.SourceMorph` as the ``src`` argument to
# `mne.VolSourceEstimate.plot`. To save some computational load when applying
# the morph, we will crop the ``stc``:
fetch_fsaverage(subjects_dir) # ensure fsaverage src exists
fname_fs_src = subjects_dir + '/fsaverage/bem/fsaverage-vol-5-src.fif'
src_fs = mne.read_source_spaces(fname_fs_src)
morph = mne.compute_source_morph(
src, subject_from='sample', src_to=src_fs, subjects_dir=subjects_dir,
niter_sdr=[10, 10, 5], niter_affine=[10, 10, 5], # just for speed
verbose=True)
stc_fs = morph.apply(stc)
del stc
stc_fs.plot(
src=src_fs, mode='stat_map', initial_time=0.085, subjects_dir=subjects_dir,
clim=dict(kind='value', pos_lims=lims), verbose=True)
###############################################################################
# References
# ----------
#
# .. footbibliography::
#
#
# .. LINKS
#
# .. _`inverted`: https://en.wikipedia.org/wiki/Invertible_matrix
| bsd-3-clause |
uvchik/pvlib-python | pvlib/test/test_forecast.py | 1 | 4065 | from datetime import datetime, timedelta
import inspect
from math import isnan
from pytz import timezone
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_allclose
from conftest import requires_siphon, has_siphon, skip_windows
pytestmark = pytest.mark.skipif(not has_siphon, reason='requires siphon')
from pvlib.location import Location
if has_siphon:
import requests
from requests.exceptions import HTTPError
from xml.etree.ElementTree import ParseError
from pvlib.forecast import GFS, HRRR_ESRL, HRRR, NAM, NDFD, RAP
# setup times and location to be tested. Tucson, AZ
_latitude = 32.2
_longitude = -110.9
_tz = 'US/Arizona'
_start = pd.Timestamp.now(tz=_tz)
_end = _start + pd.Timedelta(days=1)
_modelclasses = [
GFS, NAM, HRRR, NDFD, RAP,
skip_windows(
pytest.mark.xfail(
pytest.mark.timeout(HRRR_ESRL, timeout=60),
reason="HRRR_ESRL is unreliable"))]
_working_models = []
_variables = ['temp_air', 'wind_speed', 'total_clouds', 'low_clouds',
'mid_clouds', 'high_clouds', 'dni', 'dhi', 'ghi',]
_nonnan_variables = ['temp_air', 'wind_speed', 'total_clouds', 'dni',
'dhi', 'ghi',]
else:
_modelclasses = []
# make a model object for each model class
# get the data for that model and store it in an
# attribute for further testing
@requires_siphon
@pytest.fixture(scope='module', params=_modelclasses)
def model(request):
amodel = request.param()
amodel.raw_data = \
amodel.get_data(_latitude, _longitude, _start, _end)
return amodel
@requires_siphon
def test_process_data(model):
for how in ['liujordan', 'clearsky_scaling']:
data = model.process_data(model.raw_data, how=how)
for variable in _nonnan_variables:
assert not data[variable].isnull().values.any()
@requires_siphon
def test_vert_level():
amodel = NAM()
vert_level = 5000
data = amodel.get_processed_data(_latitude, _longitude, _start, _end,
vert_level=vert_level)
@requires_siphon
def test_datetime():
amodel = NAM()
start = datetime.now()
end = start + timedelta(days=1)
data = amodel.get_processed_data(_latitude, _longitude , start, end)
@requires_siphon
def test_queryvariables():
amodel = GFS()
old_variables = amodel.variables
new_variables = ['u-component_of_wind_height_above_ground']
data = amodel.get_data(_latitude, _longitude, _start, _end,
query_variables=new_variables)
data['u-component_of_wind_height_above_ground']
@requires_siphon
def test_latest():
GFS(set_type='latest')
@requires_siphon
def test_full():
GFS(set_type='full')
@requires_siphon
def test_temp_convert():
amodel = GFS()
data = pd.DataFrame({'temp_air': [273.15]})
data['temp_air'] = amodel.kelvin_to_celsius(data['temp_air'])
assert_allclose(data['temp_air'].values, 0.0)
# @requires_siphon
# def test_bounding_box():
# amodel = GFS()
# latitude = [31.2,32.2]
# longitude = [-111.9,-110.9]
# new_variables = {'temperature':'Temperature_surface'}
# data = amodel.get_query_data(latitude, longitude, _start, _end,
# variables=new_variables)
@requires_siphon
def test_set_location():
amodel = GFS()
latitude, longitude = 32.2, -110.9
time = datetime.now(timezone('UTC'))
amodel.set_location(time, latitude, longitude)
def test_cloud_cover_to_transmittance_linear():
amodel = GFS()
assert_allclose(amodel.cloud_cover_to_transmittance_linear(0), 0.75)
assert_allclose(amodel.cloud_cover_to_transmittance_linear(100), 0.0)
def test_cloud_cover_to_ghi_linear():
amodel = GFS()
ghi_clear = 1000
offset = 25
out = amodel.cloud_cover_to_ghi_linear(0, ghi_clear, offset=offset)
assert_allclose(out, 1000)
out = amodel.cloud_cover_to_ghi_linear(100, ghi_clear, offset=offset)
assert_allclose(out, 250)
| bsd-3-clause |
pystockhub/book | ch14/03.py | 1 | 1207 | import pandas_datareader.data as web
import datetime
import matplotlib.pyplot as plt
from zipline.api import order_target, record, symbol
from zipline.algorithm import TradingAlgorithm
start = datetime.datetime(2010, 1, 1)
end = datetime.datetime(2016, 3, 29)
data = web.DataReader("AAPL", "yahoo", start, end)
#plt.plot(data.index, data['Adj Close'])
#plt.show()
data = data[['Adj Close']]
data.columns = ['AAPL']
data = data.tz_localize('UTC')
#print(data.head())
def initialize(context):
context.i = 0
context.sym = symbol('AAPL')
def handle_data(context, data):
context.i += 1
if context.i < 20:
return
ma5 = data.history(context.sym, 'price', 5, '1d').mean()
ma20 = data.history(context.sym, 'price', 20, '1d').mean()
if ma5 > ma20:
order_target(context.sym, 1)
else:
order_target(context.sym, -1)
record(AAPL=data.current(context.sym, "price"), ma5=ma5, ma20=ma20)
algo = TradingAlgorithm(initialize=initialize, handle_data=handle_data)
result = algo.run(data)
#plt.plot(result.index, result.ma5)
#plt.plot(result.index, result.ma20)
#plt.legend(loc='best')
#plt.show()
#plt.plot(result.index, result.portfolio_value)
#plt.show() | mit |
mhue/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 75 | 34122 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), stoplist)
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
minglong-cse2016/stupidlang | docs/conf.py | 1 | 8724 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- Hack for ReadTheDocs ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
import inspect
from sphinx import apidoc
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
output_dir = os.path.join(__location__, "../docs/api")
module_dir = os.path.join(__location__, "../stupidlang")
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
apidoc.main(cmd_line.split(" "))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.pngmath',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'stupidlang'
copyright = u'2016, minglong-cse2016'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from stupidlang import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'stupidlang-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'stupidlang Documentation',
u'minglong-cse2016', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://sphinx.pocoo.org', None),
'python': ('http://docs.python.org/' + python_version, None),
'matplotlib': ('http://matplotlib.sourceforge.net', None),
'numpy': ('http://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
| mit |
jimsrc/seatos | etc/n_CR/individual/check_pos.py | 1 | 2610 | #!/usr/bin/env ipython
from pylab import *
#from load_data import sh, mc, cr
import func_data as fd
import share.funcs as ff
#import CythonSrc.funcs as ff
import matplotlib.patches as patches
import matplotlib.transforms as transforms
from os import environ as env
from os.path import isfile, isdir
from h5py import File as h5
#++++++++++++++++++++++++++++++++++++++++++++++++++++
class Lim:
def __init__(self, min_, max_, n):
self.min = min_
self.max = max_
self.n = n
def delta(self):
return (self.max-self.min) / (1.0*self.n)
"""
dir_inp_sh = '{dir}/sheaths.icmes/ascii/MCflag0.1.2.2H/woShiftCorr/_auger_' .format(dir=env['MEAN_PROFILES_ACE'])
dir_inp_mc = '{dir}/icmes/ascii/MCflag0.1.2.2H/woShiftCorr/_auger_' .format(dir=env['MEAN_PROFILES_ACE'])
#dir_inp_sh = '{dir}/sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_' .format(dir=env['MEAN_PROFILES_ACE'])
#dir_inp_mc = '{dir}/mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_' .format(dir=env['MEAN_PROFILES_ACE'])
fname_inp_part = 'MCflag0.1.2.2H_2before.4after_fgap0.2_WangNaN' # '_vlo.100.0.vhi.375.0_CRs.Auger_BandScals.txt'
#fname_inp_part = 'MCflag2_2before.4after_fgap0.2_Wang90.0'
"""
#CRstr = 'CRs.Auger_BandScals'
#CRstr = 'CRs.Auger_BandMuons'
CRstr = 'CRs.Auger_scals'
vlo, vhi = 100., 375.
#vlo, vhi = 375., 450.
#vlo, vhi = 450., 3000.
dir_inp = '../out/individual'
fname_inp = '{dir}/_nCR_vlo.{lo:5.1f}.vhi.{hi:4.1f}_{name}.h5' .format(dir=dir_inp, lo=vlo, hi=vhi, name=CRstr)
#++++++++++++++++++++++++++++++++++++++++++++++++ ajuste
#--- parameter boundaries && number of evaluations
fi = h5(fname_inp, 'r') # input
fpar = {} # fit parameters
for pname in fi.keys():
if pname=='grids':
#fpar[pname] = {}
for pname_ in fi['grids'].keys():
# grilla de exploracion con
# formato: [min, max, delta, nbin]
fpar['grids/'+pname_] = fi['grids/'+pname_][...]
continue
fpar[pname] = fi[pname].value
#fi[pname] = fit.par[pname]
#print fpar
print " ---> vlo, vhi: ", vlo, vhi
for nm in fpar.keys():
if nm.startswith('grids'):
continue
min_, max_ = fpar['grids/'+nm][0], fpar['grids/'+nm][1]
delta = fpar['grids/'+nm][2]
v = fpar[nm]
pos = (v - min_)/(max_-min_)
d = delta/(max_-min_)
print nm+': ', pos, d, '; \t', v
"""
#--- slice object
rranges = (
slice(tau.min, tau.max, tau.delta()),
slice(q.min, q.max, q.delta()),
slice(off.min, off.max, off.delta()),
slice(bp.min, bp.max, bp.delta()),
slice(bo.min, bo.max, bo.delta()),
)
"""
#EOF
| mit |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/tseries/index.py | 2 | 71968 | # pylint: disable=E1101
import operator
from datetime import time, datetime
from datetime import timedelta
import numpy as np
import warnings
from pandas.core.common import (_NS_DTYPE, _INT64_DTYPE,
_values_from_object, _maybe_box,
ABCSeries, is_integer, is_float)
from pandas.core.index import Index, Int64Index, Float64Index
import pandas.compat as compat
from pandas.compat import u
from pandas.tseries.frequencies import (
to_offset, get_period_alias,
Resolution)
from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.tseries.offsets import DateOffset, generate_range, Tick, CDay
from pandas.tseries.tools import parse_time_string, normalize_date
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.tseries.tools as tools
from pandas.lib import Timestamp
import pandas.lib as lib
import pandas.tslib as tslib
import pandas._period as period
import pandas.algos as _algos
import pandas.index as _index
def _utc():
import pytz
return pytz.utc
# -------- some conversion wrapper functions
def _field_accessor(name, field, docstring=None):
def f(self):
values = self.asi8
if self.tz is not None:
utc = _utc()
if self.tz is not utc:
values = self._local_timestamps()
if field in ['is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end',
'is_year_start', 'is_year_end']:
month_kw = self.freq.kwds.get('startingMonth', self.freq.kwds.get('month', 12)) if self.freq else 12
result = tslib.get_start_end_field(values, field, self.freqstr, month_kw)
else:
result = tslib.get_date_field(values, field)
return self._maybe_mask_results(result,convert='float64')
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _dt_index_cmp(opname, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
func = getattr(super(DatetimeIndex, self), opname)
if isinstance(other, datetime) or isinstance(other, compat.string_types):
other = _to_m8(other, tz=self.tz)
result = func(other)
if com.isnull(other):
result.fill(nat_result)
else:
if isinstance(other, list):
other = DatetimeIndex(other)
elif not isinstance(other, (np.ndarray, Index, ABCSeries)):
other = _ensure_datetime64(other)
result = func(np.asarray(other))
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view('i8') == tslib.iNaT
else:
o_mask = other.view('i8') == tslib.iNaT
if o_mask.any():
result[o_mask] = nat_result
mask = self.asi8 == tslib.iNaT
if mask.any():
result[mask] = nat_result
# support of bool dtype indexers
if com.is_bool_dtype(result):
return result
return Index(result)
return wrapper
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
raise TypeError('%s type object %s' % (type(other), str(other)))
_midnight = time(0, 0)
def _new_DatetimeIndex(cls, d):
""" This is called upon unpickling, rather than the default which doesn't have arguments
and breaks __new__ """
# simply set the tz
# data are already in UTC
tz = d.pop('tz',None)
result = cls.__new__(cls, **d)
result.tz = tz
return result
class DatetimeIndex(DatetimeIndexOpsMixin, Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
tz : pytz.timezone or dateutil.tz.tzfile
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on order
- bool-ndarray where True signifies a DST time, False signifies
a non-DST time (note that this flag is only applicable for ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times
infer_dst : boolean, default False (DEPRECATED)
Attempt to infer fall dst-transition hours based on order
name : object
Name to be stored in the index
"""
_typ = 'datetimeindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype='M8[ns]', **kwargs)
_inner_indexer = _join_i8_wrapper(_algos.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(_algos.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(_algos.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
_algos.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
__eq__ = _dt_index_cmp('__eq__')
__ne__ = _dt_index_cmp('__ne__', nat_result=True)
__lt__ = _dt_index_cmp('__lt__')
__gt__ = _dt_index_cmp('__gt__')
__le__ = _dt_index_cmp('__le__')
__ge__ = _dt_index_cmp('__ge__')
_engine_type = _index.DatetimeEngine
tz = None
offset = None
_comparables = ['name','freqstr','tz']
_attributes = ['name','freq','tz']
_datetimelike_ops = ['year','month','day','hour','minute','second',
'weekofyear','week','dayofweek','weekday','dayofyear','quarter', 'days_in_month', 'daysinmonth',
'date','time','microsecond','nanosecond','is_month_start','is_month_end',
'is_quarter_start','is_quarter_end','is_year_start','is_year_end','tz','freq']
_is_numeric_dtype = False
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False,
closed=None, ambiguous='raise', **kwargs):
dayfirst = kwargs.pop('dayfirst', None)
yearfirst = kwargs.pop('yearfirst', None)
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
raise ValueError('Periods must be a number, got %s' %
str(periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
if data is None:
return cls._generate(start, end, periods, name, freq,
tz=tz, normalize=normalize, closed=closed,
ambiguous=ambiguous)
if not isinstance(data, (np.ndarray, Index, ABCSeries)):
if np.isscalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
# try a few ways to make it datetime64
if lib.is_string_array(data):
data = _str_to_dt_array(data, freq, dayfirst=dayfirst,
yearfirst=yearfirst)
else:
data = tools.to_datetime(data, errors='raise')
data.offset = freq
if isinstance(data, DatetimeIndex):
if name is not None:
data.name = name
if tz is not None:
return data.tz_localize(tz, ambiguous=ambiguous)
return data
if issubclass(data.dtype.type, compat.string_types):
data = _str_to_dt_array(data, freq, dayfirst=dayfirst,
yearfirst=yearfirst)
if issubclass(data.dtype.type, np.datetime64):
if isinstance(data, ABCSeries):
data = data.values
if isinstance(data, DatetimeIndex):
if tz is None:
tz = data.tz
subarr = data.values
if freq is None:
freq = data.offset
verify_integrity = False
else:
if data.dtype != _NS_DTYPE:
subarr = tslib.cast_to_nanoseconds(data)
else:
subarr = data
elif data.dtype == _INT64_DTYPE:
if isinstance(data, Int64Index):
raise TypeError('cannot convert Int64Index->DatetimeIndex')
if copy:
subarr = np.asarray(data, dtype=_NS_DTYPE)
else:
subarr = data.view(_NS_DTYPE)
else:
if isinstance(data, (ABCSeries, Index)):
values = data.values
else:
values = data
if lib.is_string_array(values):
subarr = _str_to_dt_array(values, freq, dayfirst=dayfirst,
yearfirst=yearfirst)
else:
try:
subarr = tools.to_datetime(data, box=False)
# make sure that we have a index/ndarray like (and not a Series)
if isinstance(subarr, ABCSeries):
subarr = subarr.values
if subarr.dtype == np.object_:
subarr = tools.to_datetime(subarr, box=False)
except ValueError:
# tz aware
subarr = tools.to_datetime(data, box=False, utc=True)
if not np.issubdtype(subarr.dtype, np.datetime64):
raise ValueError('Unable to convert %s to datetime dtype'
% str(data))
if isinstance(subarr, DatetimeIndex):
if tz is None:
tz = subarr.tz
else:
if tz is not None:
tz = tslib.maybe_get_tz(tz)
if (not isinstance(data, DatetimeIndex) or
getattr(data, 'tz', None) is None):
# Convert tz-naive to UTC
ints = subarr.view('i8')
subarr = tslib.tz_localize_to_utc(ints, tz,
ambiguous=ambiguous)
subarr = subarr.view(_NS_DTYPE)
subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz)
if verify_integrity and len(subarr) > 0:
if freq is not None and not freq_infer:
inferred = subarr.inferred_freq
if inferred != freq.freqstr:
on_freq = cls._generate(subarr[0], None, len(subarr), None, freq, tz=tz)
if not np.array_equal(subarr.asi8, on_freq.asi8):
raise ValueError('Inferred frequency {0} from passed dates does not'
'conform to passed frequency {1}'.format(inferred, freq.freqstr))
if freq_infer:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False, ambiguous='raise', closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Must specify two of start, end, or periods')
_normalized = True
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
try:
inferred_tz = tools._infer_tzinfo(start, end)
except:
raise TypeError('Start and end cannot both be tz-aware with '
'different timezones')
inferred_tz = tslib.maybe_get_tz(inferred_tz)
# these may need to be localized
tz = tslib.maybe_get_tz(tz)
if tz is not None:
date = start or end
if date.tzinfo is not None and hasattr(tz, 'localize'):
tz = tz.localize(date.replace(tzinfo=None)).tzinfo
if tz is not None and inferred_tz is not None:
if not inferred_tz == tz:
raise AssertionError("Inferred time zone not equal to passed "
"time zone")
elif inferred_tz is not None:
tz = inferred_tz
if start is not None:
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
if hasattr(offset, 'delta') and offset != offsets.Day():
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is None:
start = start.tz_localize(tz)
if end is not None and end.tz is None:
end = end.tz_localize(tz)
if start and end:
if start.tz is None and end.tz is not None:
start = start.tz_localize(end.tz)
if end.tz is None and start.tz is not None:
end = end.tz_localize(start.tz)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
else:
if tz is not None:
# naive dates
if start is not None and start.tz is not None:
start = start.replace(tzinfo=None)
if end is not None and end.tz is not None:
end = end.replace(tzinfo=None)
if start and end:
if start.tz is None and end.tz is not None:
end = end.replace(tzinfo=None)
if end.tz is None and start.tz is not None:
start = start.replace(tzinfo=None)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None and getattr(index, 'tz', None) is None:
index = tslib.tz_localize_to_utc(com._ensure_int64(index), tz,
ambiguous=ambiguous)
index = index.view(_NS_DTYPE)
index = cls._simple_new(index, name=name, freq=offset, tz=tz)
if not left_closed:
index = index[1:]
if not right_closed:
index = index[:-1]
return index
@property
def _box_func(self):
return lambda x: Timestamp(x, offset=self.offset, tz=self.tz)
def _local_timestamps(self):
utc = _utc()
if self.is_monotonic:
return tslib.tz_convert(self.asi8, utc, self.tz)
else:
values = self.asi8
indexer = values.argsort()
result = tslib.tz_convert(values.take(indexer), utc, self.tz)
n = len(indexer)
reverse = np.empty(n, dtype=np.int_)
reverse.put(indexer, np.arange(n))
return result.take(reverse)
@classmethod
def _simple_new(cls, values, name=None, freq=None, tz=None, **kwargs):
if not getattr(values,'dtype',None):
values = np.array(values,copy=False)
if values.dtype != _NS_DTYPE:
values = com._ensure_int64(values).view(_NS_DTYPE)
result = object.__new__(cls)
result._data = values
result.name = name
result.offset = freq
result.tz = tslib.maybe_get_tz(tz)
result._reset_identity()
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is None and end is None:
# I somewhat believe this should never be raised externally and therefore
# should be a `PandasError` but whatever...
raise TypeError('Must specify either start or end.')
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if (start is None or end is None) and periods is None:
raise TypeError('Must either specify period or provide both start and end.')
if offset is None:
# This can't happen with external-facing code, therefore PandasError
raise TypeError('Must provide offset.')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = tools.to_datetime(list(xdr), box=False)
cachedRange = DatetimeIndex._simple_new(arr)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if not isinstance(end, Timestamp):
raise AssertionError('end must be an instance of Timestamp')
end = offset.rollback(end)
endLoc = cachedRange.get_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
if not isinstance(start, Timestamp):
raise AssertionError('start must be an instance of Timestamp')
start = offset.rollforward(start)
startLoc = cachedRange.get_loc(start)
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.get_loc(start)
endLoc = cachedRange.get_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return tslib.ints_to_pydatetime(self.asi8, self.tz)
_na_value = tslib.NaT
"""The expected NA value to use with this index."""
@cache_readonly
def _is_dates_only(self):
from pandas.core.format import _is_dates_only
return _is_dates_only(self.values)
@property
def _formatter_func(self):
from pandas.core.format import _get_format_datetime64
formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)
return lambda x: formatter(x, tz=self.tz)
def __reduce__(self):
# we use a special reudce here because we need
# to simply set the .tz (and not reinterpret it)
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_DatetimeIndex, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(DatetimeIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
# provide numpy < 1.7 compat
if nd_state[2] == 'M8[us]':
new_state = np.ndarray.__reduce__(data.astype('M8[ns]'))
np.ndarray.__setstate__(data, new_state[2])
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _sub_datelike(self, other):
# subtract a datetime from myself, yielding a TimedeltaIndex
from pandas import TimedeltaIndex
other = Timestamp(other)
# require tz compat
if tslib.get_timezone(self.tz) != tslib.get_timezone(other.tzinfo):
raise TypeError("Timestamp subtraction must have the same timezones or no timezones")
i8 = self.asi8
result = i8 - other.value
result = self._maybe_mask_results(result,fill_value=tslib.iNaT)
return TimedeltaIndex(result,name=self.name,copy=False)
def _add_delta(self, delta):
from pandas import TimedeltaIndex
if isinstance(delta, (Tick, timedelta, np.timedelta64)):
new_values = self._add_delta_td(delta)
elif isinstance(delta, TimedeltaIndex):
new_values = self._add_delta_tdi(delta)
else:
new_values = self.astype('O') + delta
tz = 'UTC' if self.tz is not None else None
result = DatetimeIndex(new_values, tz=tz, freq='infer')
utc = _utc()
if self.tz is not None and self.tz is not utc:
result = result.tz_convert(self.tz)
return result
def _format_native_types(self, na_rep=u('NaT'),
date_format=None, **kwargs):
data = self.asobject
from pandas.core.format import Datetime64Formatter
return Datetime64Formatter(values=data,
nat_rep=na_rep,
date_format=date_format,
justify='all').get_result()
def to_datetime(self, dayfirst=False):
return self.copy()
def _format_footer(self):
tagline = 'Length: %d, Freq: %s, Timezone: %s'
return tagline % (len(self), self.freqstr, self.tz)
def astype(self, dtype):
dtype = np.dtype(dtype)
if dtype == np.object_:
return self.asobject
elif dtype == _INT64_DTYPE:
return self.asi8.copy()
else: # pragma: no cover
raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype)
def _get_time_micros(self):
utc = _utc()
values = self.asi8
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
return tslib.get_time_micros(values)
def to_series(self, keep_tz=False):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
keep_tz : optional, defaults False.
return the data keeping the timezone.
If keep_tz is True:
If the timezone is not set, the resulting
Series will have a datetime64[ns] dtype.
Otherwise the Series will have an object dtype; the
tz will be preserved.
If keep_tz is False:
Series will have a datetime64[ns] dtype. TZ aware
objects will have the tz removed.
Returns
-------
Series
"""
from pandas import Series
return Series(self._to_embed(keep_tz), index=self, name=self.name)
def _to_embed(self, keep_tz=False):
"""
return an array repr of this object, potentially casting to object
This is for internal compat
"""
if keep_tz and self.tz is not None:
return self.asobject.values
return self.values
def to_pydatetime(self):
"""
Return DatetimeIndex as object ndarray of datetime.datetime objects
Returns
-------
datetimes : ndarray
"""
return tslib.ints_to_pydatetime(self.asi8, tz=self.tz)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from pandas.tseries.period import PeriodIndex
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
msg = "You must pass a freq argument as current index has none."
raise ValueError(msg)
freq = get_period_alias(freq)
return PeriodIndex(self.values, name=self.name, freq=freq, tz=self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occurring frequency
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype=_NS_DTYPE)
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, DatetimeIndex):
result.tz = this.tz
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
def union_many(self, others):
"""
A bit of a hack to accelerate unioning a collection of indexes
"""
this = self
for other in others:
if not isinstance(this, DatetimeIndex):
this = Index.union(this, other)
continue
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = this._maybe_utc_convert(other)
if this._can_fast_union(other):
this = this._fast_union(other)
else:
tz = this.tz
this = Index.union(this, other)
if isinstance(this, DatetimeIndex):
this.tz = tz
if this.freq is None:
this.offset = to_offset(this.inferred_freq)
return this
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
name = self.name
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if isinstance(obj, Index) and obj.name != name:
name = None
break
to_concat = self._ensure_compat_concat(to_concat)
to_concat, factory = _process_concat_data(to_concat, name)
return factory(to_concat)
def join(self, other, how='left', level=None, return_indexers=False):
"""
See Index.join
"""
if (not isinstance(other, DatetimeIndex) and len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer',
'mixed-integer-float', 'mixed')):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers)
def _maybe_utc_convert(self, other):
this = self
if isinstance(other, DatetimeIndex):
if self.tz is not None:
if other.tz is None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
elif other.tz is not None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, DatetimeIndex)
and self.offset == other.offset
and self._can_fast_union(other)):
joined = self._shallow_copy(joined)
joined.name = name
return joined
else:
tz = getattr(other, 'tz', None)
return self._simple_new(joined, name, tz=tz)
def _can_fast_union(self, other):
if not isinstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None or offset != other.offset:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
try:
return (right_start == left_end + offset) or right_start in left
except (ValueError):
# if we are comparing an offset that does not propogate timezones
# this will raise
return False
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = com._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
else:
return type(self)(start=left_start,
end=max(left_end, right_end),
freq=left.offset)
def __array_finalize__(self, obj):
if self.ndim == 0: # pragma: no cover
return self.item()
self.offset = getattr(obj, 'offset', None)
self.tz = getattr(obj, 'tz', None)
self.name = getattr(obj, 'name', None)
self._reset_identity()
def __iter__(self):
"""
Return an iterator over the boxed values
Returns
-------
Timestamps : ndarray
"""
# convert in chunks of 10k for efficiency
data = self.asi8
l = len(self)
chunksize = 10000
chunks = int(l / chunksize) + 1
for i in range(chunks):
start_i = i*chunksize
end_i = min((i+1)*chunksize,l)
converted = tslib.ints_to_pydatetime(data[start_i:end_i], tz=self.tz, offset=self.offset, box=True)
for v in converted:
yield v
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
if self.tz != other.tz:
raise ValueError('Passed item and index have different timezone')
return self._simple_new(result, name=name, freq=None, tz=self.tz)
def intersection(self, other):
"""
Specialized intersection for DatetimeIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
elif (other.offset is None or self.offset is None or
other.offset != self.offset or
not other.offset.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _parsed_string_to_bounds(self, reso, parsed):
"""
Calculate datetime bounds for parsed time string and its resolution.
Parameters
----------
reso : Resolution
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
Returns
-------
lower, upper: pd.Timestamp
"""
is_monotonic = self.is_monotonic
if reso == 'year':
return (Timestamp(datetime(parsed.year, 1, 1), tz=self.tz),
Timestamp(datetime(parsed.year, 12, 31, 23, 59, 59, 999999), tz=self.tz))
elif reso == 'month':
d = tslib.monthrange(parsed.year, parsed.month)[1]
return (Timestamp(datetime(parsed.year, parsed.month, 1), tz=self.tz),
Timestamp(datetime(parsed.year, parsed.month, d, 23, 59, 59, 999999), tz=self.tz))
elif reso == 'quarter':
qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
d = tslib.monthrange(parsed.year, qe)[1] # at end of month
return (Timestamp(datetime(parsed.year, parsed.month, 1), tz=self.tz),
Timestamp(datetime(parsed.year, qe, d, 23, 59, 59, 999999), tz=self.tz))
elif reso == 'day':
st = datetime(parsed.year, parsed.month, parsed.day)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Day(), tz=self.tz).value - 1))
elif reso == 'hour':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Hour(),
tz=self.tz).value - 1))
elif reso == 'minute':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Minute(),
tz=self.tz).value - 1))
elif reso == 'second':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute, second=parsed.second)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Second(),
tz=self.tz).value - 1))
elif reso == 'microsecond':
st = datetime(parsed.year, parsed.month, parsed.day,
parsed.hour, parsed.minute, parsed.second,
parsed.microsecond)
return (Timestamp(st, tz=self.tz), Timestamp(st, tz=self.tz))
else:
raise KeyError
def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
is_monotonic = self.is_monotonic
if ((reso in ['day', 'hour', 'minute'] and
not (self._resolution < Resolution.get_reso(reso) or
not is_monotonic)) or
(reso == 'second' and
not (self._resolution <= Resolution.RESO_SEC or
not is_monotonic))):
# These resolution/monotonicity validations came from GH3931,
# GH3452 and GH2369.
raise KeyError
if reso == 'microsecond':
# _partial_date_slice doesn't allow microsecond resolution, but
# _parsed_string_to_bounds allows it.
raise KeyError
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
stamps = self.asi8
if is_monotonic:
# we are out of range
if len(stamps) and (
(use_lhs and t1.value < stamps[0] and t2.value < stamps[0]) or (
(use_rhs and t1.value > stamps[-1] and t2.value > stamps[-1]))):
raise KeyError
# a monotonic (sorted) series can be sliced
left = stamps.searchsorted(t1.value, side='left') if use_lhs else None
right = stamps.searchsorted(t2.value, side='right') if use_rhs else None
return slice(left, right)
lhs_mask = (stamps >= t1.value) if use_lhs else True
rhs_mask = (stamps <= t2.value) if use_rhs else True
# try to find a the dates
return (lhs_mask & rhs_mask).nonzero()[0]
def _possibly_promote(self, other):
if other.inferred_type == 'date':
other = DatetimeIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if isinstance(key, datetime):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
return self.get_value_maybe_box(series, key)
if isinstance(key, time):
locs = self.indexer_at_time(key)
return series.take(locs)
try:
return _maybe_box(self, Index.get_value(self, series, key), series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
elif not isinstance(key, Timestamp):
key = Timestamp(key)
values = self._engine.get_value(_values_from_object(series), key)
return _maybe_box(self, values, series, key)
def get_loc(self, key, method=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if isinstance(key, datetime):
# needed to localize naive datetimes
key = Timestamp(key, tz=self.tz)
return Index.get_loc(self, key, method=method)
if isinstance(key, time):
if method is not None:
raise NotImplementedError('cannot yet lookup inexact labels '
'when key is a time object')
return self.indexer_at_time(key)
try:
return Index.get_loc(self, key, method=method)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timestamp(key, tz=self.tz)
return Index.get_loc(self, stamp, method=method)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to datetime according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : string / None
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
if is_float(label) or isinstance(label, time) or is_integer(label):
self._invalid_indexer('slice',label)
if isinstance(label, compat.string_types):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(label, freq)
bounds = self._parsed_string_to_bounds(reso, parsed)
return bounds[0 if side == 'left' else 1]
else:
return label
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(key, freq)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError('Must have step size of 1 with time slices')
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError('Cannot mix time and non-time slice keys')
try:
return Index.slice_indexer(self, start, end, step)
except KeyError:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if ((start is None or isinstance(start, compat.string_types)) and
(end is None or isinstance(end, compat.string_types))):
mask = True
if start is not None:
start_casted = self._maybe_cast_slice_bound(start, 'left', kind)
mask = start_casted <= self
if end is not None:
end_casted = self._maybe_cast_slice_bound(end, 'right', kind)
mask = (self <= end_casted) & mask
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
def __getitem__(self, key):
getitem = self._data.__getitem__
if np.isscalar(key):
val = getitem(key)
return Timestamp(val, offset=self.offset, tz=self.tz)
else:
if com.is_bool_indexer(key):
key = np.asarray(key)
if key.all():
key = slice(0,None,None)
else:
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
new_offset = None
if isinstance(key, slice):
if self.offset is not None and key.step is not None:
new_offset = key.step * self.offset
else:
new_offset = self.offset
result = getitem(key)
if result.ndim > 1:
return result
return self._simple_new(result, self.name, new_offset, self.tz)
# alias to offset
def _get_freq(self):
return self.offset
def _set_freq(self, value):
self.offset = value
freq = property(fget=_get_freq, fset=_set_freq, doc="get/set the frequncy of the Index")
year = _field_accessor('year', 'Y', "The year of the datetime")
month = _field_accessor('month', 'M', "The month as January=1, December=12")
day = _field_accessor('day', 'D', "The days of the datetime")
hour = _field_accessor('hour', 'h', "The hours of the datetime")
minute = _field_accessor('minute', 'm', "The minutes of the datetime")
second = _field_accessor('second', 's', "The seconds of the datetime")
millisecond = _field_accessor('millisecond', 'ms', "The milliseconds of the datetime")
microsecond = _field_accessor('microsecond', 'us', "The microseconds of the datetime")
nanosecond = _field_accessor('nanosecond', 'ns', "The nanoseconds of the datetime")
weekofyear = _field_accessor('weekofyear', 'woy', "The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 'dow',
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
dayofyear = _field_accessor('dayofyear', 'doy', "The ordinal day of the year")
quarter = _field_accessor('quarter', 'q', "The quarter of the date")
days_in_month = _field_accessor('days_in_month', 'dim', "The number of days in the month")
daysinmonth = days_in_month
is_month_start = _field_accessor('is_month_start', 'is_month_start', "Logical indicating if first day of month (defined by frequency)")
is_month_end = _field_accessor('is_month_end', 'is_month_end', "Logical indicating if last day of month (defined by frequency)")
is_quarter_start = _field_accessor('is_quarter_start', 'is_quarter_start', "Logical indicating if first day of quarter (defined by frequency)")
is_quarter_end = _field_accessor('is_quarter_end', 'is_quarter_end', "Logical indicating if last day of quarter (defined by frequency)")
is_year_start = _field_accessor('is_year_start', 'is_year_start', "Logical indicating if first day of year (defined by frequency)")
is_year_end = _field_accessor('is_year_end', 'is_year_end', "Logical indicating if last day of year (defined by frequency)")
@property
def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
# can't call self.map() which tries to treat func as ufunc
# and causes recursion warnings on python 2.6
return self._maybe_mask_results(_algos.arrmap_object(self.asobject.values, lambda x: x.time()))
@property
def date(self):
"""
Returns numpy array of datetime.date. The date part of the Timestamps.
"""
return self._maybe_mask_results(_algos.arrmap_object(self.asobject.values, lambda x: x.date()))
def normalize(self):
"""
Return DatetimeIndex with times to midnight. Length is unaltered
Returns
-------
normalized : DatetimeIndex
"""
new_values = tslib.date_normalize(self.asi8, self.tz)
return DatetimeIndex(new_values, freq='infer', name=self.name,
tz=self.tz)
def searchsorted(self, key, side='left'):
if isinstance(key, (np.ndarray, Index)):
key = np.array(key, dtype=_NS_DTYPE, copy=False)
else:
key = _to_m8(key, tz=self.tz)
return self.values.searchsorted(key, side=side)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'datetime'
@property
def inferred_type(self):
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return 'datetime64'
@property
def dtype(self):
return _NS_DTYPE
@property
def is_all_dates(self):
return True
@cache_readonly
def is_normalized(self):
"""
Returns True if all of the dates are at midnight ("no time")
"""
return tslib.dates_normalized(self.asi8, self.tz)
@cache_readonly
def _resolution(self):
return period.resolution(self.asi8, self.tz)
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if (not hasattr(other, 'inferred_type') or
other.inferred_type != 'datetime64'):
if self.offset is not None:
return False
try:
other = DatetimeIndex(other)
except:
return False
if self.tz is not None:
if other.tz is None:
return False
same_zone = tslib.get_timezone(
self.tz) == tslib.get_timezone(other.tz)
else:
if other.tz is not None:
return False
same_zone = True
return same_zone and np.array_equal(self.asi8, other.asi8)
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
freq = None
if isinstance(item, (datetime, np.datetime64)):
zone = tslib.get_timezone(self.tz)
izone = tslib.get_timezone(getattr(item, 'tzinfo', None))
if zone != izone:
raise ValueError('Passed item and index have different timezone')
# check freq can be preserved on edge cases
if self.freq is not None:
if (loc == 0 or loc == -len(self)) and item + self.freq == self[0]:
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item, tz=self.tz)
try:
new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
if self.tz is not None:
new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item,compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError("cannot insert DatetimeIndex with incompatible label")
def delete(self, loc):
"""
Make a new DatetimeIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : DatetimeIndex
"""
new_dates = np.delete(self.asi8, loc)
freq = None
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if com.is_list_like(loc):
loc = lib.maybe_indices_to_slice(com._ensure_int64(np.array(loc)))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
if self.tz is not None:
new_dates = tslib.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
def tz_convert(self, tz):
"""
Convert tz-aware DatetimeIndex from one time zone to another (using pytz/dateutil)
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding UTC time.
Returns
-------
normalized : DatetimeIndex
"""
tz = tslib.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError('Cannot convert tz-naive timestamps, use '
'tz_localize to localize')
# No conversion since timestamps are all UTC to begin with
return self._shallow_copy(tz=tz)
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def tz_localize(self, tz, ambiguous='raise'):
"""
Localize tz-naive DatetimeIndex to given time zone (using pytz/dateutil),
or remove timezone from tz-aware DatetimeIndex
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on order
- bool-ndarray where True signifies a DST time, False signifies
a non-DST time (note that this flag is only applicable for ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times
infer_dst : boolean, default False (DEPRECATED)
Attempt to infer fall dst-transition hours based on order
Returns
-------
localized : DatetimeIndex
"""
if self.tz is not None:
if tz is None:
new_dates = tslib.tz_convert(self.asi8, 'UTC', self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = tslib.maybe_get_tz(tz)
# Convert to UTC
new_dates = tslib.tz_localize_to_utc(self.asi8, tz,
ambiguous=ambiguous)
new_dates = new_dates.view(_NS_DTYPE)
return self._shallow_copy(new_dates, tz=tz)
def indexer_at_time(self, time, asof=False):
"""
Select values at particular time of day (e.g. 9:30AM)
Parameters
----------
time : datetime.time or string
Returns
-------
values_at_time : TimeSeries
"""
from dateutil.parser import parse
if asof:
raise NotImplementedError
if isinstance(time, compat.string_types):
time = parse(time).time()
if time.tzinfo:
# TODO
raise NotImplementedError
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
def indexer_between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
Select values between particular times of day (e.g., 9:00-9:30AM)
Parameters
----------
start_time : datetime.time or string
end_time : datetime.time or string
include_start : boolean, default True
include_end : boolean, default True
tz : string or pytz.timezone or dateutil.tz.tzfile, default None
Returns
-------
values_between_time : TimeSeries
"""
from dateutil.parser import parse
if isinstance(start_time, compat.string_types):
start_time = parse(start_time).time()
if isinstance(end_time, compat.string_types):
end_time = parse(end_time).time()
if start_time.tzinfo or end_time.tzinfo:
raise NotImplementedError
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros),
rop(time_micros, end_micros))
return mask.nonzero()[0]
def to_julian_date(self):
"""
Convert DatetimeIndex to Float64Index of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
http://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = self.year
month = self.month
day = self.day
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return Float64Index(day +
np.fix((153*month - 457)/5) +
365*year +
np.floor(year / 4) -
np.floor(year / 100) +
np.floor(year / 400) +
1721118.5 +
(self.hour +
self.minute/60.0 +
self.second/3600.0 +
self.microsecond/3600.0/1e+6 +
self.nanosecond/3600.0/1e+9
)/24.0)
DatetimeIndex._add_numeric_methods_disabled()
DatetimeIndex._add_logical_methods_disabled()
DatetimeIndex._add_datetimelike_methods()
def _generate_regular_range(start, end, periods, offset):
if isinstance(offset, Tick):
stride = offset.nanos
if periods is None:
b = Timestamp(start).value
e = Timestamp(end).value
e += stride - e % stride
# end.tz == start.tz by this point due to _generate implementation
tz = start.tz
elif start is not None:
b = Timestamp(start).value
e = b + np.int64(periods) * stride
tz = start.tz
elif end is not None:
e = Timestamp(end).value + stride
b = e - np.int64(periods) * stride
tz = end.tz
else:
raise NotImplementedError
data = np.arange(b, e, stride, dtype=np.int64)
data = DatetimeIndex._simple_new(data, None, tz=tz)
else:
if isinstance(start, Timestamp):
start = start.to_pydatetime()
if isinstance(end, Timestamp):
end = end.to_pydatetime()
xdr = generate_range(start=start, end=end,
periods=periods, offset=offset)
dates = list(xdr)
# utc = len(dates) > 0 and dates[0].tzinfo is not None
data = tools.to_datetime(dates)
return data
def date_range(start=None, end=None, periods=None, freq='D', tz=None,
normalize=False, name=None, closed=None):
"""
Return a fixed frequency datetime index, with day (calendar) as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Hong_Kong
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name of the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed)
def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
normalize=True, name=None, closed=None):
"""
Return a fixed frequency datetime index, with business day as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name for the resulting index
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed)
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
**EXPERIMENTAL** Return a fixed frequency datetime index, with
CustomBusinessDay as the default frequency
.. warning:: EXPERIMENTAL
The CustomBusinessDay class is not officially supported and the API is
likely to change in future versions. Use this at your own risk.
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer or None, default None
If None, must specify start and end
freq : string or DateOffset, default 'C' (CustomBusinessDay)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : str, default None
Name for the resulting index
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
2 of start, end, or periods must be specified
Returns
-------
rng : DatetimeIndex
"""
if freq=='C':
holidays = kwargs.pop('holidays', [])
weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri')
freq = CDay(holidays=holidays, weekmask=weekmask)
return DatetimeIndex(start=start, end=end, periods=periods, freq=freq,
tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def _to_m8(key, tz=None):
'''
Timestamp-like => dt64
'''
if not isinstance(key, Timestamp):
# this also converts strings
key = Timestamp(key, tz=tz)
return np.int64(tslib.pydt_to_i8(key)).view(_NS_DTYPE)
def _str_to_dt_array(arr, offset=None, dayfirst=None, yearfirst=None):
def parser(x):
result = parse_time_string(x, offset, dayfirst=dayfirst,
yearfirst=yearfirst)
return result[0]
arr = np.asarray(arr, dtype=object)
data = _algos.arrmap_object(arr, parser)
return tools.to_datetime(data)
_CACHE_START = Timestamp(datetime(1950, 1, 1))
_CACHE_END = Timestamp(datetime(2030, 1, 1))
_daterange_cache = {}
def _naive_in_cache_range(start, end):
if start is None or end is None:
return False
else:
if start.tzinfo is not None or end.tzinfo is not None:
return False
return _in_range(start, end, _CACHE_START, _CACHE_END)
def _in_range(start, end, rng_start, rng_end):
return start > rng_start and end < rng_end
def _use_cached_range(offset, _normalized, start, end):
return (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end))
def _time_to_micros(time):
seconds = time.hour * 60 * 60 + 60 * time.minute + time.second
return 1000000 * seconds + time.microsecond
def _process_concat_data(to_concat, name):
klass = Index
kwargs = {}
concat = np.concatenate
all_dti = True
need_utc_convert = False
has_naive = False
tz = None
for x in to_concat:
if not isinstance(x, DatetimeIndex):
all_dti = False
else:
if tz is None:
tz = x.tz
if x.tz is None:
has_naive = True
if x.tz != tz:
need_utc_convert = True
tz = 'UTC'
if all_dti:
need_obj_convert = False
if has_naive and tz is not None:
need_obj_convert = True
if need_obj_convert:
to_concat = [x.asobject.values for x in to_concat]
else:
if need_utc_convert:
to_concat = [x.tz_convert('UTC').values for x in to_concat]
else:
to_concat = [x.values for x in to_concat]
# well, technically not a "class" anymore...oh well
klass = DatetimeIndex._simple_new
kwargs = {'tz': tz}
concat = com._concat_compat
else:
for i, x in enumerate(to_concat):
if isinstance(x, DatetimeIndex):
to_concat[i] = x.asobject.values
elif isinstance(x, Index):
to_concat[i] = x.values
factory_func = lambda x: klass(concat(x), name=name, **kwargs)
return to_concat, factory_func
| gpl-2.0 |
pprett/scikit-learn | sklearn/gaussian_process/tests/test_gaussian_process.py | 46 | 7057 | """
Testing for Gaussian Process module (sklearn.gaussian_process)
"""
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# License: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from sklearn.gaussian_process import regression_models as regression
from sklearn.gaussian_process import correlation_models as correlation
from sklearn.datasets import make_regression
from sklearn.utils.testing import assert_greater, assert_true, raises
f = lambda x: x * np.sin(x)
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = f(X).ravel()
def test_1d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a one-dimensional Gaussian Process model.
# Check random start optimization.
# Test the interpolating property.
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=random_start, verbose=False).fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
y2_pred, MSE2 = gp.predict(X2, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.)
and np.allclose(MSE2, 0., atol=10))
def test_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the interpolating property.
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = g(X).ravel()
thetaL = [1e-4] * 2
thetaU = [1e-1] * 2
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=thetaL,
thetaU=thetaU,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
eps = np.finfo(gp.theta_.dtype).eps
assert_true(np.all(gp.theta_ >= thetaL - eps)) # Lower bounds of hyperparameters
assert_true(np.all(gp.theta_ <= thetaU + eps)) # Upper bounds of hyperparameters
def test_2d_2d(regr=regression.constant, corr=correlation.squared_exponential,
random_start=10, beta0=None):
# MLE estimation of a two-dimensional Gaussian Process model accounting for
# anisotropy. Check random start optimization.
# Test the GP interpolation for 2D output
b, kappa, e = 5., .5, .1
g = lambda x: b - x[:, 1] - kappa * (x[:, 0] - e) ** 2.
f = lambda x: np.vstack((g(x), g(x))).T
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
y = f(X)
gp = GaussianProcess(regr=regr, corr=corr, beta0=beta0,
theta0=[1e-2] * 2, thetaL=[1e-4] * 2,
thetaU=[1e-1] * 2,
random_start=random_start, verbose=False)
gp.fit(X, y)
y_pred, MSE = gp.predict(X, eval_MSE=True)
assert_true(np.allclose(y_pred, y) and np.allclose(MSE, 0.))
@raises(ValueError)
def test_wrong_number_of_outputs():
gp = GaussianProcess()
gp.fit([[1, 2, 3], [4, 5, 6]], [1, 2, 3])
def test_more_builtin_correlation_models(random_start=1):
# Repeat test_1d and test_2d for several built-in correlation
# models specified as strings.
all_corr = ['absolute_exponential', 'squared_exponential', 'cubic',
'linear']
for corr in all_corr:
test_1d(regr='constant', corr=corr, random_start=random_start)
test_2d(regr='constant', corr=corr, random_start=random_start)
test_2d_2d(regr='constant', corr=corr, random_start=random_start)
def test_ordinary_kriging():
# Repeat test_1d and test_2d with given regression weights (beta0) for
# different regression models (Ordinary Kriging).
test_1d(regr='linear', beta0=[0., 0.5])
test_1d(regr='quadratic', beta0=[0., 0.5, 0.5])
test_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
test_2d_2d(regr='linear', beta0=[0., 0.5, 0.5])
test_2d_2d(regr='quadratic', beta0=[0., 0.5, 0.5, 0.5, 0.5, 0.5])
def test_no_normalize():
gp = GaussianProcess(normalize=False).fit(X, y)
y_pred = gp.predict(X)
assert_true(np.allclose(y_pred, y))
def test_batch_size():
# TypeError when using batch_size on Python 3, see
# https://github.com/scikit-learn/scikit-learn/issues/7329 for more
# details
gp = GaussianProcess()
gp.fit(X, y)
gp.predict(X, batch_size=1)
gp.predict(X, batch_size=1, eval_MSE=True)
def test_random_starts():
# Test that an increasing number of random-starts of GP fitting only
# increases the reduced likelihood function of the optimal theta.
n_samples, n_features = 50, 3
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)
best_likelihood = -np.inf
for random_start in range(1, 5):
gp = GaussianProcess(regr="constant", corr="squared_exponential",
theta0=[1e-0] * n_features,
thetaL=[1e-4] * n_features,
thetaU=[1e+1] * n_features,
random_start=random_start, random_state=0,
verbose=False).fit(X, y)
rlf = gp.reduced_likelihood_function()[0]
assert_greater(rlf, best_likelihood - np.finfo(np.float32).eps)
best_likelihood = rlf
def test_mse_solving():
# test the MSE estimate to be sane.
# non-regression test for ignoring off-diagonals of feature covariance,
# testing with nugget that renders covariance useless, only
# using the mean function, with low effective rank of data
gp = GaussianProcess(corr='absolute_exponential', theta0=1e-4,
thetaL=1e-12, thetaU=1e-2, nugget=1e-2,
optimizer='Welch', regr="linear", random_state=0)
X, y = make_regression(n_informative=3, n_features=60, noise=50,
random_state=0, effective_rank=1)
gp.fit(X, y)
assert_greater(1000, gp.predict(X, eval_MSE=True)[1].mean())
| bsd-3-clause |
neale/CS-program | 534-MachineLearning2/decision_tree/decision_tree.py | 1 | 5371 | import os
import sys
import operator
import itertools
import matplotlib.pyplot as plt
import numpy as np
import collections
verbose = False
def import_data():
with open('iris_test-1.csv', 'rb') as f:
X = f.readlines()
for i, l in enumerate(X):
X[i] = X[i].strip('\r\n').split(';')
X[i] = [float(n) for n in X[i]]
X_train = np.array(X)
Y_train = X_train[:,-1]
with open('iris_test-1.csv', 'rb') as f:
X = f.readlines()
for i, l in enumerate(X):
X[i] = X[i].strip('\r\n').split(';')
X[i] = [float(n) for n in X[i]]
X_test = np.array(X)
Y_test = X_train[:,-1]
return X_train, Y_train, X_test, Y_test
def entropy(X, feature, theta):
#information gain is given by:
# -p1*log2(p1) - p2*log2(p2) - p3*log2(p3)
p1t, p1f = [], []
p2t, p2f = [], []
p3t, p3f = [], []
total = float(len(X))
for row in X:
if row[feature] > theta:
if row[-1] == 0.0: p1t.append(row)
if row[-1] == 1.0: p2t.append(row)
if row[-1] == 2.0: p3t.append(row)
else:
if row[-1] == 0.0: p1f.append(row)
if row[-1] == 1.0: p2f.append(row)
if row[-1] == 2.0: p3f.append(row)
# now we have filtered lists
# total elements in positive half of split
ptotal = float(len(p1t)+len(p2t)+len(p3t))
# total elements in negative half of split
ntotal = float(len(p1f)+len(p2f)+len(p3f))
# gain from each half of the split
if ptotal > 0:
if len(p1t) > 0: pgain1 = -(len(p1t)/ptotal)*np.log2(len(p1t)/ptotal)
else : pgain1 = 0
if len(p2t) > 0: pgain2 = -(len(p2t)/ptotal)*np.log2(len(p2t)/ptotal)
else: pgain2 = 0
if len(p3t) > 0: pgain3 = -(len(p3t)/ptotal)*np.log2(len(p3t)/ptotal)
else: pgain3 = 0
Hp = pgain1 - pgain2 - pgain3
else:
Hp = 0
if ntotal > 0:
if len(p1f) > 0: ngain1 = -(len(p1f)/ntotal)*np.log2(len(p1f)/ntotal)
else : ngain1 = 0
if len(p2f) > 0: ngain2 = -(len(p2f)/ntotal)*np.log2(len(p2f)/ntotal)
else: ngain2 = 0
if len(p3f) > 0: ngain3 = -(len(p3f)/ntotal)*np.log2(len(p3f)/ntotal)
else: ngain3 = 0
Hn = ngain1 - ngain2 - ngain3
else:
Hn = 0
# total information gain given by H(top level) -H(splits)
x1 = filter(lambda x: x[-1] == 0, X)
x2 = filter(lambda x: x[-1] == 1, X)
x3 = filter(lambda x: x[-1] == 2, X)
Hx1 = -(len(x1)/total) * np.log2(len(x1)/total)
Hx2 = -(len(x2)/total) * np.log2(len(x2)/total)
Hx3 = -(len(x3)/total) * np.log2(len(x3)/total)
Hx = Hx1-Hx2-Hx3
H = Hx - (ptotal/total)*Hp - (ntotal/total)*Hn
return H
# generalized printing structure for debugging
def pprint(s, priority):
if verbose:
print s
else:
if priority == 1:
print s
def split_data(X, feature, split):
L = []
R = []
for row in X:
if row[feature] > split : L.append(row)
else : R.append(row)
return L, R
class RandomForest(object):
def __init__(self, Xtr, Ytr, Xte, Yte, n, k):
self.X_train = Xtr
self.Y_train = Ytr
self.X_test = Xte
self.Y_test = Yte
self.n_trees = n
self.tests = []
self.K = k
def apply(self):
pass
def __build_tree(self, X, dir=None, memo=None):
theta = 0
splits = []
gain = []
n_features = len(X[0]) -1
print "length: {}".format( len(X) )
if len(X) <= self.K:
return memo
for col in range(n_features):
# for each feature we need to compute information gain by splitting on some threshold
gain = []
for threshold in range(1, 10):
# save the information gain for each split into a list
gain.append( ( np.exp ( entropy(X, col, threshold) ), threshold, col ) )
# save the best split for that feature into another list
splits.append( max(gain, key=operator.itemgetter(0)) )
pprint ("gain for feature {} is a split of theta = {} : {}".format(col, splits[-1][1], splits[-1][0]), 0)
# The chosen split is then the maximum gain from all the features tested
test = max (splits, key=operator.itemgetter(0))
feat = test[-1]
split = test[1]
# save split into global list of splits that we will use for predict
pprint ("\nsplit on feature {} with theta of {}".format(feat, split), 1)
splitL, splitR = split_data(X, feat, split)
self.tests.append( self.__build_tree(splitL, 'L', test) )
self.tests.append( self.__build_tree(splitR, 'R', test) )
def fit(self):
self.__build_tree(self.X_train)
pprint("printing decision tree\n", 1)
for i, test in enumerate(self.tests):
pprint("{}: Split feature {} on {}".format(i+1, test[1], test[2]), 1)
pprint ("\nDONE", 1)
def predict(self):
pass
def decision_path(self):
pass
if __name__ == '__main__':
Xtr, Ytr, Xte, Yte = import_data()
clf = RandomForest(Xtr, Ytr, Xte, Yte, 1, 5)
clf.fit()
clf.predict()
| unlicense |
MMTObservatory/mmtwfs | mmtwfs/wfs.py | 1 | 72281 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# coding=utf-8
"""
Classes and utilities for operating the wavefront sensors of the MMTO and analyzing the data they produce
"""
import warnings
import pathlib
import numpy as np
import photutils
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from skimage import feature
from scipy import ndimage, optimize
from scipy.ndimage import rotate
import lmfit
import astropy.units as u
from astropy.io import fits
from astropy.io import ascii
from astropy import stats, visualization, timeseries
from astropy.modeling.models import Gaussian2D, Polynomial2D
from astropy.modeling.fitting import LevMarLSQFitter
from astropy.table import conf as table_conf
from astroscrappy import detect_cosmics
from ccdproc.utils.slices import slice_from_string
from .config import recursive_subclasses, merge_config, mmtwfs_config
from .telescope import TelescopeFactory
from .f9topbox import CompMirror
from .zernike import ZernikeVector, zernike_slopes, cart2pol, pol2cart
from .custom_exceptions import WFSConfigException, WFSAnalysisFailed, WFSCommandException
import logging
import logging.handlers
log = logging.getLogger("WFS")
log.setLevel(logging.INFO)
warnings.simplefilter(action="ignore", category=FutureWarning)
table_conf.replace_warnings = ['attributes']
__all__ = ['SH_Reference', 'WFS', 'F9', 'NewF9', 'F5', 'Binospec', 'MMIRS', 'WFSFactory', 'wfs_norm', 'check_wfsdata',
'wfsfind', 'grid_spacing', 'center_pupil', 'get_apertures', 'match_apertures', 'aperture_distance', 'fit_apertures',
'get_slopes', 'make_init_pars', 'slope_diff', 'mk_wfs_mask']
def wfs_norm(data, interval=visualization.ZScaleInterval(contrast=0.05), stretch=visualization.LinearStretch()):
"""
Define default image normalization to use for WFS images
"""
norm = visualization.mpl_normalize.ImageNormalize(
data,
interval=interval,
stretch=stretch
)
return norm
def check_wfsdata(data, header=False):
"""
Utility to validate WFS data
Parameters
----------
data : FITS filename or 2D ndarray
WFS image
Returns
-------
data : 2D np.ndarray
Validated 2D WFS image
"""
hdr = None
if isinstance(data, (str, pathlib.PosixPath)):
# we're a fits file (hopefully)
try:
with fits.open(data) as h:
data = h[-1].data # binospec images put the image data into separate extension so always grab last available.
if header:
hdr = h[-1].header
except Exception as e:
msg = "Error reading FITS file, %s (%s)" % (data, repr(e))
raise WFSConfigException(value=msg)
if not isinstance(data, np.ndarray):
msg = "WFS image data in improper format, %s" % type(data)
raise WFSConfigException(value=msg)
if len(data.shape) != 2:
msg = "WFS image data has improper shape, %dD. Must be 2D image." % len(data.shape)
raise WFSConfigException(value=msg)
if header and hdr is not None:
return data, hdr
else:
return data
def mk_wfs_mask(data, thresh_factor=50., outfile="wfs_mask.fits"):
"""
Take a WFS image and mask/scale it so that it can be used as a reference for pupil centering
Parameters
----------
data : FITS filename or 2D ndarray
WFS image
thresh_factor : float (default: 50.)
Fraction of maximum value below which will be masked to 0.
outfile : string (default: wfs_mask.fits)
Output FITS file to write the resulting image to.
Returns
-------
scaled : 2D ndarray
Scaled and masked WFS image
"""
data = check_wfsdata(data)
mx = data.max()
thresh = mx / thresh_factor
data[data < thresh] = 0.
scaled = data / mx
if outfile is not None:
fits.writeto(outfile, scaled)
return scaled
def wfsfind(data, fwhm=7.0, threshold=5.0, plot=True, ap_radius=5.0, std=None):
"""
Use photutils.DAOStarFinder() to find and centroid spots in a Shack-Hartmann WFS image.
Parameters
----------
data : FITS filename or 2D ndarray
WFS image
fwhm : float (default: 5.)
FWHM in pixels of DAOfind convolution kernel
threshold : float
DAOfind threshold in units of the standard deviation of the image
plot: bool
Toggle plotting of the reference image and overlayed apertures
ap_radius : float
Radius of plotted apertures
"""
# data should be background subtracted first...
data = check_wfsdata(data)
if std is None:
mean, median, std = stats.sigma_clipped_stats(data, sigma=3.0, maxiters=5)
daofind = photutils.DAOStarFinder(fwhm=fwhm, threshold=threshold*std, sharphi=0.95)
sources = daofind(data)
if sources is None:
msg = "WFS spot detection failed or no spots detected."
raise WFSAnalysisFailed(value=msg)
# this may be redundant given the above check...
nsrcs = len(sources)
if nsrcs == 0:
msg = "No WFS spots detected."
raise WFSAnalysisFailed(value=msg)
# only keep spots more than 1/4 as bright as the max. need this for f/9 especially.
sources = sources[sources['flux'] > sources['flux'].max()/4.]
fig = None
if plot:
fig, ax = plt.subplots()
fig.set_label("WFSfind")
positions = list(zip(sources['xcentroid'], sources['ycentroid']))
apertures = photutils.CircularAperture(positions, r=ap_radius)
norm = wfs_norm(data)
ax.imshow(data, cmap='Greys', origin='lower', norm=norm, interpolation='None')
apertures.plot(color='red', lw=1.5, alpha=0.5, axes=ax)
return sources, fig
def grid_spacing(data, apertures):
"""
Measure the WFS grid spacing which changes with telescope focus.
Parameters
----------
data : WFS image (FITS or np.ndarray)
apertures : `~astropy.table.Table`
WFS aperture data to analyze
Returns
-------
xspacing, yspacing : float, float
Average grid spacing in X and Y axes
"""
data = check_wfsdata(data)
x = np.arange(data.shape[1])
y = np.arange(data.shape[0])
bx = np.arange(data.shape[1]+1)
by = np.arange(data.shape[0]+1)
# bin the spot positions along the axes and use Lomb-Scargle to measure the grid spacing in each direction
xsum = np.histogram(apertures['xcentroid'], bins=bx)
ysum = np.histogram(apertures['ycentroid'], bins=by)
k = np.linspace(10.0, 50., 1000) # look for spacings from 10 to 50 pixels (plenty of range, but not too small to alias)
f = 1.0 / k # convert spacing to frequency
xp = timeseries.LombScargle(x, xsum[0]).power(f)
yp = timeseries.LombScargle(y, ysum[0]).power(f)
# the peak of the power spectrum will coincide with the average spacing
xspacing = k[xp.argmax()]
yspacing = k[yp.argmax()]
return xspacing, yspacing
def center_pupil(input_data, pup_mask, threshold=0.8, sigma=10., plot=True):
"""
Find the center of the pupil in a WFS image using skimage.feature.match_template(). This generates
a correlation image and we centroid the peak of the correlation to determine the center.
Parameters
----------
data : str or 2D ndarray
WFS image to analyze, either FITS file or ndarray image data
pup_mask : str or 2D ndarray
Pupil model to use in the template matching
threshold : float (default: 0.0)
Sets image to 0 where it's below threshold * image.max()
sigma : float (default: 20.)
Sigma of gaussian smoothing kernel
plot : bool
Toggle plotting of the correlation image
Returns
-------
cen : tuple (float, float)
X and Y pixel coordinates of the pupil center
"""
data = np.copy(check_wfsdata(input_data))
pup_mask = check_wfsdata(pup_mask).astype(np.float64) # need to force float64 here to make scipy >= 1.4 happy...
# smooth the image to increae the S/N.
smo = ndimage.gaussian_filter(data, sigma)
# use skimage.feature.match_template() to do a fast cross-correlation between the WFS image and the pupil model.
# the location of the peak of the correlation will be the center of the WFS pattern.
match = feature.match_template(smo, pup_mask, pad_input=True)
find_thresh = threshold * match.max()
t = photutils.detection.find_peaks(match, find_thresh, box_size=5, centroid_func=photutils.centroids.centroid_com)
if t is None:
msg = "No valid pupil or spot pattern detected."
raise WFSAnalysisFailed(value=msg)
peak = t['peak_value'].max()
xps = []
yps = []
# if there are peaks that are very nearly correlated, average their positions
for p in t:
if p['peak_value'] >= 0.95*peak:
xps.append(p['x_centroid'])
yps.append(p['y_centroid'])
xp = np.mean(xps)
yp = np.mean(yps)
fig = None
if plot:
fig, ax = plt.subplots()
fig.set_label("Pupil Correlation Image (masked)")
ax.imshow(match, interpolation=None, cmap=cm.magma, origin='lower')
ax.scatter(xp, yp, marker="+", color="green")
return xp, yp, fig
def get_apertures(data, apsize, fwhm=5.0, thresh=7.0, plot=True, cen=None):
"""
Use wfsfind to locate and centroid spots. Measure their S/N ratios and the sigma of a 2D gaussian fit to
the co-added spot.
Parameters
----------
data : str or 2D ndarray
WFS image to analyze, either FITS file or ndarray image data
apsize : float
Diameter/width of the SH apertures
Returns
-------
srcs : astropy.table.Table
Detected WFS spot positions and properties
masks : list of photutils.ApertureMask objects
Masks used for aperture centroiding
snrs : 1D np.ndarray
S/N for each located spot
sigma : float
"""
data = check_wfsdata(data)
# set maxiters to None to let this clip all the way to convergence
if cen is None:
mean, median, stddev = stats.sigma_clipped_stats(data, sigma=3.0, maxiters=None)
else:
xcen, ycen = int(cen[1]), int(cen[0])
mean, median, stddev = stats.sigma_clipped_stats(data[ycen-50:ycen+50, xcen-50:ycen+50], sigma=3.0, maxiters=None)
# use wfsfind() and pass it the clipped stddev from here
with warnings.catch_warnings():
warnings.simplefilter("ignore")
srcs, wfsfind_fig = wfsfind(data, fwhm=fwhm, threshold=thresh, std=stddev, plot=plot)
# we use circular apertures here because they generate square masks of the appropriate size.
# rectangular apertures produced masks that were sqrt(2) too large.
# see https://github.com/astropy/photutils/issues/499 for details.
apers = photutils.CircularAperture(
list(zip(srcs['xcentroid'], srcs['ycentroid'])),
r=apsize/2.
)
masks = apers.to_mask(method='subpixel')
sigma = 0.0
snrs = []
if len(masks) >= 1:
spot = np.zeros(masks[0].shape)
for m in masks:
subim = m.cutout(data)
# make co-added spot image for use in calculating the seeing
if subim.shape == spot.shape:
spot += subim
signal = subim.sum()
noise = np.sqrt(stddev**2 * subim.shape[0] * subim.shape[1])
snr = signal / noise
snrs.append(snr)
snrs = np.array(snrs)
# set up 2D gaussian model plus constant background to fit to the coadded spot
with warnings.catch_warnings():
# ignore astropy warnings about issues with the fit...
warnings.simplefilter("ignore")
g2d = Gaussian2D(amplitude=spot.max(), x_mean=spot.shape[1]/2, y_mean=spot.shape[0]/2)
p2d = Polynomial2D(degree=0)
model = g2d + p2d
fitter = LevMarLSQFitter()
y, x = np.mgrid[:spot.shape[0], :spot.shape[1]]
fit = fitter(model, x, y, spot)
sigma = 0.5 * (fit.x_stddev_0.value + fit.y_stddev_0.value)
return srcs, masks, snrs, sigma, wfsfind_fig
def match_apertures(refx, refy, spotx, spoty, max_dist=25.):
"""
Given reference aperture and spot X/Y positions, loop through reference apertures and find closest spot. Use
max_dist to exclude matches that are too far from reference position. Return masks to use to denote validly
matched apertures.
"""
refs = np.array([refx, refy])
spots = np.array([spotx, spoty])
match = np.nan * np.ones(len(refx))
matched = []
for i in np.arange(len(refx)):
dists = np.sqrt((spots[0]-refs[0][i])**2 + (spots[1]-refs[1][i])**2)
min_i = np.argmin(dists)
if np.min(dists) < max_dist:
if min_i not in matched:
match[i] = min_i
matched.append(min_i)
else:
if min_i not in matched:
match[i] = np.nan
ref_mask = ~np.isnan(match)
src_mask = match[ref_mask]
return ref_mask, src_mask.astype(int)
def aperture_distance(refx, refy, spotx, spoty):
"""
Calculate the sum of the distances between each reference aperture and the closest measured spot position.
This total distance is the statistic to minimize when fitting the reference aperture grid to the data.
"""
tot_dist = 0.0
refs = np.array([refx, refy])
spots = np.array([spotx, spoty])
for i in np.arange(len(refx)):
dists = np.sqrt((spots[0]-refs[0][i])**2 + (spots[1]-refs[1][i])**2)
tot_dist += np.min(dists)
return np.log(tot_dist)
def fit_apertures(pars, ref, spots):
"""
Scale the reference positions by the fit parameters and calculate the total distance between the matches.
The parameters of the fit are:
``xc, yc = center positions``
``scale = magnification of the grid (focus)``
``xcoma, ycoma = linear change in magnification as a function of x/y (coma)``
'ref' and 'spots' are assumed to be dict-like and must have the keys 'xcentroid' and 'ycentroid'.
Parameters
----------
pars : list-like
The fit parameters passed in as a 5 element list: (xc, yc, scale, xcoma, ycoma)
ref : dict-like
Dict containing ``xcentroid`` and ``ycentroid`` keys that contain the reference X and Y
positions of the apertures.
spots : dict-like
Dict containing ``xcentroid`` and ``ycentroid`` keys that contain the measured X and Y
positions of the apertures.
Returns
-------
dist : float
The cumulative distance between the matched reference and measured aperture positions.
"""
xc = pars[0]
yc = pars[1]
scale = pars[2]
xcoma = pars[3]
ycoma = pars[4]
refx = ref['xcentroid'] * (scale + ref['xcentroid'] * xcoma) + xc
refy = ref['ycentroid'] * (scale + ref['ycentroid'] * ycoma) + yc
spotx = spots['xcentroid']
spoty = spots['ycentroid']
dist = aperture_distance(refx, refy, spotx, spoty)
return dist
def get_slopes(data, ref, pup_mask, fwhm=7., thresh=5., cen=[255, 255],
cen_thresh=0.8, cen_sigma=10., cen_tol=50., spot_snr_thresh=3.0, plot=True):
"""
Analyze a WFS image and produce pixel offsets between reference and observed spot positions.
Parameters
----------
data : str or 2D np.ndarray
FITS file or np.ndarray containing WFS observation
ref : `~astropy.table.Table`
Table of reference apertures
pup_mask : str or 2D np.ndarray
FITS file or np.ndarray containing mask used to register WFS spot pattern via cross-correlation
fwhm : float (default: 7.0)
FWHM of convolution kernel applied to image by the spot finding algorithm
thresh : float (default: 5.0)
Number of sigma above background for a spot to be considered detected
cen : list-like with 2 elements (default: [255, 255])
Expected position of the center of the WFS spot pattern in form [X_cen, Y_cen]
cen_thresh : float (default: 0.8)
Masking threshold as fraction of peak value used in `~photutils.detection.find_peaks`
cen_sigma : float (default: 10.0)
Width of gaussian filter applied to image by `~mmtwfs.wfs.center_pupil`
cen_tol : float (default: 50.0)
Tolerance for difference between expected and measureed pupil center
spot_snr_thresh : float (default: 3.0)
S/N tolerance for a WFS spot to be considered valid for analysis
plot : bool
Toggle plotting of image with aperture overlays
Returns
-------
results : dict
Results of the wavefront slopes measurement packaged into a dict with the following keys:
slopes - mask np.ndarry containing the slope values in pixel units
pup_coords - pupil coordinates for the position for each slope value
spots - `~astropy.table.Table` as returned by photutils star finder routines
src_aps - `~photutils.aperture.CircularAperture` for each detected spot
spacing - list-like of form (xspacing, yspacing) containing the mean spacing between rows and columns of spots
center - list-like of form (xcen, ycen) containing the center of the spot pattern
ref_mask - np.ndarray of matched spots in reference image
src_mask - np.ndarray of matched spots in the data image
spot_sigma - sigma of a gaussian fit to a co-addition of detected spots
figures - dict of figures that are optionally produced
grid_fit - dict of best-fit parameters of grid fit used to do fine registration between source and reference spots
"""
data = check_wfsdata(data)
pup_mask = check_wfsdata(pup_mask)
if ref.pup_outer is None:
raise WFSConfigException("No pupil information applied to SH reference.")
pup_outer = ref.pup_outer
pup_inner = ref.pup_inner
# input data should be background subtracted for best results. this initial guess of the center positions
# will be good enough to get the central obscuration, but will need to be fine-tuned for aperture association.
xcen, ycen, pupcen_fig = center_pupil(data, pup_mask, threshold=cen_thresh, sigma=cen_sigma, plot=plot)
if np.hypot(xcen-cen[0], ycen-cen[1]) > cen_tol:
msg = f"Measured pupil center [{round(xcen)}, {round(ycen)}] more than {cen_tol} pixels from {cen}."
raise WFSAnalysisFailed(value=msg)
# using the mean spacing is straightforward for square apertures and a reasonable underestimate for hexagonal ones
ref_spacing = np.mean([ref.xspacing, ref.yspacing])
apsize = ref_spacing
srcs, masks, snrs, sigma, wfsfind_fig = get_apertures(data, apsize, fwhm=fwhm, thresh=thresh, cen=(xcen, ycen))
# ignore low S/N spots
srcs = srcs[snrs > spot_snr_thresh]
# get grid spacing of the data
xspacing, yspacing = grid_spacing(data, srcs)
# find the scale difference between data and ref and use as init
init_scale = (xspacing/ref.xspacing + yspacing/ref.yspacing) / 2.
# apply masking to detected sources to avoid partially illuminated apertures at the edges
srcs['dist'] = np.sqrt((srcs['xcentroid'] - xcen)**2 + (srcs['ycentroid'] - ycen)**2)
srcs = srcs[(srcs['dist'] > pup_inner*init_scale) & (srcs['dist'] < pup_outer*init_scale)]
# if we don't detect spots in at least half of the reference apertures, we can't usually get a good wavefront measurement
if len(srcs) < 0.5 * len(ref.masked_apertures['xcentroid']):
msg = "Only %d spots detected out of %d apertures." % (len(srcs), len(ref.masked_apertures['xcentroid']))
raise WFSAnalysisFailed(value=msg)
src_aps = photutils.CircularAperture(
list(zip(srcs['xcentroid'], srcs['ycentroid'])),
r=apsize/2.
)
# set up to do a fit of the reference apertures to the spot positions with the center, scaling, and position-dependent
# scaling (coma) as free parameters
args = (ref.masked_apertures, srcs)
par_keys = ('xcen', 'ycen', 'scale', 'xcoma', 'ycoma')
pars = (xcen, ycen, init_scale, 0.0, 0.0)
coma_bound = 1e-4 # keep coma constrained by now since it can cause trouble
# scipy.optimize.minimize can do bounded minimization so leverage that to keep the solution within a reasonable range.
bounds = (
(xcen-15, xcen+15), # hopefully we're not too far off from true center...
(ycen-15, ycen+15),
(init_scale-0.05, init_scale+0.05), # reasonable range of expected focus difference...
(-coma_bound, coma_bound),
(-coma_bound, coma_bound)
)
try:
min_results = optimize.minimize(fit_apertures, pars, args=args, bounds=bounds, options={'ftol': 1e-13, 'gtol': 1e-7})
except Exception as e:
msg = f"Aperture grid matching failed: {e}"
raise WFSAnalysisFailed(value=msg)
fit_results = {}
for i, k in enumerate(par_keys):
fit_results[k] = min_results['x'][i]
# this is more reliably the center of the actual pupil image whereas fit_results shifts a bit depending on detected spots.
# the lenslet pattern can move around a bit on the pupil, but we need the center of the pupil to calculate their pupil
# coordinates.
pup_center = [xcen, ycen]
scale = fit_results['scale']
xcoma, ycoma = fit_results['xcoma'], fit_results['ycoma']
refx = ref.masked_apertures['xcentroid'] * (scale + ref.masked_apertures['xcentroid'] * xcoma) + fit_results['xcen']
refy = ref.masked_apertures['ycentroid'] * (scale + ref.masked_apertures['ycentroid'] * ycoma) + fit_results['ycen']
xspacing = scale * ref.xspacing
yspacing = scale * ref.yspacing
# coarse match reference apertures to spots
spacing = np.max([xspacing, yspacing])
ref_mask, src_mask = match_apertures(refx, refy, srcs['xcentroid'], srcs['ycentroid'], max_dist=spacing/2.)
# these are unscaled so that the slope includes defocus
trim_refx = ref.masked_apertures['xcentroid'][ref_mask] + fit_results['xcen']
trim_refy = ref.masked_apertures['ycentroid'][ref_mask] + fit_results['ycen']
ref_aps = photutils.CircularAperture(
list(zip(trim_refx, trim_refy)),
r=ref_spacing/2.
)
slope_x = srcs['xcentroid'][src_mask] - trim_refx
slope_y = srcs['ycentroid'][src_mask] - trim_refy
pup_coords = (ref_aps.positions - pup_center) / [pup_outer, pup_outer]
aps_fig = None
if plot:
norm = wfs_norm(data)
aps_fig, ax = plt.subplots()
aps_fig.set_label("Aperture Positions")
ax.imshow(data, cmap='Greys', origin='lower', norm=norm, interpolation='None')
ax.scatter(pup_center[0], pup_center[1])
src_aps.plot(color='blue', axes=ax)
# need full slopes array the size of the complete set of reference apertures and pre-filled with np.nan for masking
slopes = np.nan * np.ones((2, len(ref.masked_apertures['xcentroid'])))
slopes[0][ref_mask] = slope_x
slopes[1][ref_mask] = slope_y
figures = {}
figures['pupil_center'] = pupcen_fig
figures['slopes'] = aps_fig
results = {
"slopes": np.ma.masked_invalid(slopes),
"pup_coords": pup_coords.transpose(),
"spots": srcs,
"src_aps": src_aps,
"spacing": (xspacing, yspacing),
"center": pup_center,
"ref_mask": ref_mask,
"src_mask": src_mask,
"spot_sigma": sigma,
"figures": figures,
"grid_fit": fit_results
}
return results
def make_init_pars(nmodes=21, modestart=2, init_zv=None):
"""
Make a set of initial parameters that can be used with `~lmfit.minimize` to make a wavefront fit with
parameter names that are compatible with ZernikeVectors.
Parameters
----------
nmodes: int (default: 21)
Number of Zernike modes to fit.
modestart: int (default: 2)
First Zernike mode to be used.
init_zv: ZernikeVector (default: None)
ZernikeVector containing initial values for the fit.
Returns
-------
params: `~lmfit.Parameters` instance
Initial parameters in form that can be passed to `~lmfit.minimize`.
"""
pars = []
for i in range(modestart, modestart+nmodes, 1):
key = "Z{:02d}".format(i)
if init_zv is not None:
val = init_zv[key].value
if val < 2. * np.finfo(float).eps:
val = 0.0
else:
val = 0.0
zpar = (key, val)
pars.append(zpar)
params = lmfit.Parameters()
params.add_many(*pars)
return params
def slope_diff(pars, coords, slopes, norm=False):
"""
For a given set of wavefront fit parameters, calculate the "distance" between the predicted and measured wavefront
slopes. This function is used by `~lmfit.minimize` which expects the sqrt to be applied rather than a chi-squared,
"""
parsdict = pars.valuesdict()
rho, phi = cart2pol(coords)
xslope = slopes[0]
yslope = slopes[1]
pred_xslope, pred_yslope = zernike_slopes(parsdict, rho, phi, norm=norm)
dist = np.sqrt((xslope - pred_xslope)**2 + (yslope - pred_yslope)**2)
return dist
class SH_Reference(object):
"""
Class to handle Shack-Hartmann reference data
"""
def __init__(self, data, fwhm=4.5, threshold=20.0, plot=True):
"""
Read WFS reference image and generate reference magnifications (i.e. grid spacing) and
aperture positions.
Parameters
----------
data : FITS filename or 2D ndarray
WFS reference image
fwhm : float
FWHM in pixels of DAOfind convolution kernel
threshold : float
DAOfind threshold in units of the standard deviation of the image
plot : bool
Toggle plotting of the reference image and overlayed apertures
"""
self.data = check_wfsdata(data)
data = data - np.median(data)
self.apertures, self.figure = wfsfind(data, fwhm=fwhm, threshold=threshold, plot=plot)
if plot:
self.figure.set_label("Reference Image")
self.xcen = self.apertures['xcentroid'].mean()
self.ycen = self.apertures['ycentroid'].mean()
self.xspacing, self.yspacing = grid_spacing(data, self.apertures)
# make masks for each reference spot and fit a 2D gaussian to get its FWHM. the reference FWHM is subtracted in
# quadrature from the observed FWHM when calculating the seeing.
apsize = np.mean([self.xspacing, self.yspacing])
apers = photutils.CircularAperture(
list(zip(self.apertures['xcentroid'], self.apertures['ycentroid'])),
r=apsize/2.
)
masks = apers.to_mask(method='subpixel')
self.photapers = apers
self.spot = np.zeros(masks[0].shape)
for m in masks:
subim = m.cutout(data)
# make co-added spot image for use in calculating the seeing
if subim.shape == self.spot.shape:
self.spot += subim
self.apertures['xcentroid'] -= self.xcen
self.apertures['ycentroid'] -= self.ycen
self.apertures['dist'] = np.sqrt(self.apertures['xcentroid']**2 + self.apertures['ycentroid']**2)
self.masked_apertures = self.apertures
self.pup_inner = None
self.pup_outer = None
def adjust_center(self, x, y):
"""
Adjust reference center to new x, y position.
"""
self.apertures['xcentroid'] += self.xcen
self.apertures['ycentroid'] += self.ycen
self.apertures['xcentroid'] -= x
self.apertures['ycentroid'] -= y
self.apertures['dist'] = np.sqrt(self.apertures['xcentroid']**2 + self.apertures['ycentroid']**2)
self.xcen = x
self.ycen = y
self.apply_pupil(self.pup_inner, self.pup_outer)
def apply_pupil(self, pup_inner, pup_outer):
"""
Apply a pupil mask to the reference apertures
"""
if pup_inner is not None and pup_outer is not None:
self.masked_apertures = self.apertures[(self.apertures['dist'] > pup_inner) & (self.apertures['dist'] < pup_outer)]
self.pup_inner = pup_inner
self.pup_outer = pup_outer
def pup_coords(self, pup_outer):
"""
Take outer radius of pupil and calculate pupil coordinates for the masked apertures
"""
coords = (self.masked_apertures['xcentroid']/pup_outer, self.masked_apertures['ycentroid']/pup_outer)
return coords
def WFSFactory(wfs="f5", config={}, **kwargs):
"""
Build and return proper WFS sub-class instance based on the value of 'wfs'.
"""
config = merge_config(config, dict(**kwargs))
wfs = wfs.lower()
types = recursive_subclasses(WFS)
wfses = [t.__name__.lower() for t in types]
wfs_map = dict(list(zip(wfses, types)))
if wfs not in wfses:
raise WFSConfigException(value="Specified WFS, %s, not valid or not implemented." % wfs)
if 'plot' in config:
plot = config['plot']
else:
plot = True
wfs_cls = wfs_map[wfs](config=config, plot=plot)
return wfs_cls
class WFS(object):
"""
Defines configuration pattern and methods common to all WFS systems
"""
def __init__(self, config={}, plot=True, **kwargs):
key = self.__class__.__name__.lower()
self.__dict__.update(merge_config(mmtwfs_config['wfs'][key], config))
self.telescope = TelescopeFactory(telescope=self.telescope, secondary=self.secondary)
self.secondary = self.telescope.secondary
self.plot = plot
self.connected = False
self.ref_fwhm = self.ref_spot_fwhm()
# this factor calibrates spot motion in pixels to nm of wavefront error
self.tiltfactor = self.telescope.nmperasec * (self.pix_size.to(u.arcsec).value)
# if this is the same for all modes, load it once here
if hasattr(self, "reference_file"):
refdata, hdr = check_wfsdata(self.reference_file, header=True)
refdata = self.trim_overscan(refdata, hdr)
reference = SH_Reference(refdata, plot=self.plot)
# now assign 'reference' for each mode so that it can be accessed consistently in all cases
for mode in self.modes:
if 'reference_file' in self.modes[mode]:
refdata, hdr = check_wfsdata(self.modes[mode]['reference_file'], header=True)
refdata = self.trim_overscan(refdata, hdr)
self.modes[mode]['reference'] = SH_Reference(
refdata,
plot=self.plot
)
else:
self.modes[mode]['reference'] = reference
def ref_spot_fwhm(self):
"""
Calculate the Airy FWHM in pixels of a perfect WFS spot from the optical prescription and detector pixel size
"""
theta_fwhm = 1.028 * self.eff_wave / self.lenslet_pitch
det_fwhm = np.arctan(theta_fwhm).value * self.lenslet_fl
det_fwhm_pix = det_fwhm.to(u.um).value / self.pix_um.to(u.um).value
return det_fwhm_pix
def get_flipud(self, mode=None):
"""
Determine if the WFS image needs to be flipped up/down
"""
return False
def get_fliplr(self, mode=None):
"""
Determine if the WFS image needs to be flipped left/right
"""
return False
def ref_pupil_location(self, mode, hdr=None):
"""
Get the center of the pupil on the reference image
"""
ref = self.modes[mode]['reference']
x = ref.xcen
y = ref.ycen
return x, y
def seeing(self, mode, sigma, airmass=None):
"""
Given a sigma derived from a gaussian fit to a WFS spot, deconvolve the systematic width from the reference image
and relate the remainder to r_0 and thus a seeing FWHM.
"""
# the effective wavelength of the WFS imagers is about 600-700 nm. mmirs and the oldf9 system use blue-blocking filters
wave = self.eff_wave
wave = wave.to(u.m).value # r_0 equation expects meters so convert
refwave = 500 * u.nm # standard wavelength that seeing values are referenced to
refwave = refwave.to(u.m).value
# calculate the physical size of each aperture.
ref = self.modes[mode]['reference']
apsize_pix = np.max((ref.xspacing, ref.yspacing))
d = self.telescope.diameter * apsize_pix / self.pup_size
d = d.to(u.m).value # r_0 equation expects meters so convert
# we need to deconvolve the instrumental spot width from the measured one to get the portion of the width that
# is due to spot motion
ref_sigma = stats.funcs.gaussian_fwhm_to_sigma * self.ref_fwhm
if sigma > ref_sigma:
corr_sigma = np.sqrt(sigma**2 - ref_sigma**2)
else:
return 0.0 * u.arcsec, 0.0 * u.arcsec
corr_sigma *= self.pix_size.to(u.rad).value # r_0 equation expects radians so convert
# this equation relates the motion within a single aperture to the characteristic scale size of the
# turbulence, r_0.
r_0 = (0.179 * (wave**2) * (d**(-1/3))/corr_sigma**2)**0.6
# this equation relates the turbulence scale size to an expected image FWHM at the given wavelength.
raw_seeing = u.Quantity(u.rad * 0.98 * wave / r_0, u.arcsec)
# seeing scales as lambda^-1/5 so calculate factor to scale to reference lambda
wave_corr = refwave**-0.2 / wave**-0.2
raw_seeing *= wave_corr
# correct seeing to zenith
if airmass is not None:
seeing = raw_seeing / airmass**0.6
else:
seeing = raw_seeing
return seeing, raw_seeing
def pupil_mask(self, hdr=None):
"""
Load and return the WFS spot mask used to locate and register the pupil
"""
pup_mask = check_wfsdata(self.wfs_mask)
return pup_mask
def reference_aberrations(self, mode, **kwargs):
"""
Create reference ZernikeVector for 'mode'.
"""
z = ZernikeVector(**self.modes[mode]['ref_zern'])
return z
def get_mode(self, hdr):
"""
If mode is not specified, either set it to the default mode or figure out the mode from the header.
"""
mode = self.default_mode
return mode
def process_image(self, fitsfile):
"""
Process the image to make it suitable for accurate wavefront analysis. Steps include nuking cosmic rays,
subtracting background, handling overscan regions, etc.
"""
rawdata, hdr = check_wfsdata(fitsfile, header=True)
trimdata = self.trim_overscan(rawdata, hdr=hdr)
# MMIRS gets a lot of hot pixels/CRs so make a quick pass to nuke them
cr_mask, data = detect_cosmics(trimdata, sigclip=5., niter=5, cleantype='medmask', psffwhm=5.)
# calculate the background and subtract it
bkg_estimator = photutils.ModeEstimatorBackground()
mask = photutils.make_source_mask(data, nsigma=2, npixels=5, dilate_size=11)
bkg = photutils.Background2D(data, (10, 10), filter_size=(5, 5), bkg_estimator=bkg_estimator, mask=mask)
data -= bkg.background
return data, hdr
def trim_overscan(self, data, hdr=None):
"""
Use the DATASEC in the header to determine the region to trim out. If no header provided or if the header
doesn't contain DATASEC, return data unchanged.
"""
if hdr is None:
return data
if 'DATASEC' not in hdr:
# if no DATASEC in header, punt and return unchanged
return data
datasec = slice_from_string(hdr['DATASEC'], fits_convention=True)
return data[datasec]
def measure_slopes(self, fitsfile, mode=None, plot=True, flipud=False, fliplr=False):
"""
Take a WFS image in FITS format, perform background subtration, pupil centration, and then use get_slopes()
to perform the aperture placement and spot centroiding.
"""
data, hdr = self.process_image(fitsfile)
plot = plot and self.plot
# flip data up/down if we need to. only binospec needs to currently.
if flipud or self.get_flipud(mode=mode):
data = np.flipud(data)
# flip left/right if we need to. no mode currently does, but who knows what the future holds.
if fliplr or self.get_fliplr(mode=mode):
data = np.fliplr(data)
if mode is None:
mode = self.get_mode(hdr)
if mode not in self.modes:
msg = "Invalid mode, %s, for WFS system, %s." % (mode, self.__class__.__name__)
raise WFSConfigException(value=msg)
# if available, get the rotator angle out of the header
if 'ROT' in hdr:
rotator = hdr['ROT'] * u.deg
else:
rotator = 0.0 * u.deg
# if there's a ROTOFF in the image header, grab it and adjust the rotator angle accordingly
if 'ROTOFF' in hdr:
rotator -= hdr['ROTOFF'] * u.deg
# make mask for finding wfs spot pattern
pup_mask = self.pupil_mask(hdr=hdr)
# get adjusted reference center position and update the reference
xcen, ycen = self.ref_pupil_location(mode, hdr=hdr)
self.modes[mode]['reference'].adjust_center(xcen, ycen)
# apply pupil to the reference
self.modes[mode]['reference'].apply_pupil(self.pup_inner, self.pup_size/2.)
ref_zv = self.reference_aberrations(mode, hdr=hdr)
zref = ref_zv.array
if len(zref) < self.nzern:
pad = np.zeros(self.nzern - len(zref))
zref = np.hstack((zref, pad))
try:
slope_results = get_slopes(
data,
self.modes[mode]['reference'],
pup_mask,
fwhm=self.find_fwhm,
thresh=self.find_thresh,
cen=self.cor_coords,
cen_thresh=self.cen_thresh,
cen_sigma=self.cen_sigma,
cen_tol=self.cen_tol,
plot=plot
)
slopes = slope_results['slopes']
coords = slope_results['pup_coords']
ref_pup_coords = self.modes[mode]['reference'].pup_coords(self.pup_size/2.)
rho, phi = cart2pol(ref_pup_coords)
ref_slopes = -(1. / self.tiltfactor) * np.array(zernike_slopes(ref_zv, rho, phi))
aps = slope_results['src_aps']
ref_mask = slope_results['ref_mask']
src_mask = slope_results['src_mask']
figures = slope_results['figures']
except WFSAnalysisFailed as e:
log.warning(f"Wavefront slope measurement failed: {e}")
slope_fig = None
if plot:
slope_fig, ax = plt.subplots()
slope_fig.set_label("WFS Image")
norm = wfs_norm(data)
ax.imshow(data, cmap='Greys', origin='lower', norm=norm, interpolation='None')
results = {}
results['slopes'] = None
results['figures'] = {}
results['mode'] = mode
results['figures']['slopes'] = slope_fig
return results
except Exception as e:
raise WFSAnalysisFailed(value=str(e))
# use the average width of the spots to estimate the seeing and use the airmass to extrapolate to zenith seeing
if 'AIRMASS' in hdr:
airmass = hdr['AIRMASS']
else:
airmass = None
seeing, raw_seeing = self.seeing(mode=mode, sigma=slope_results['spot_sigma'], airmass=airmass)
if plot:
sub_slopes = slopes - ref_slopes
x = aps.positions.transpose()[0][src_mask]
y = aps.positions.transpose()[1][src_mask]
uu = sub_slopes[0][ref_mask]
vv = sub_slopes[1][ref_mask]
norm = wfs_norm(data)
figures['slopes'].set_label("Aperture Positions and Spot Movement")
ax = figures['slopes'].axes[0]
ax.imshow(data, cmap='Greys', origin='lower', norm=norm, interpolation='None')
aps.plot(color='blue', axes=ax)
ax.quiver(x, y, uu, vv, scale_units='xy', scale=0.2, pivot='tip', color='red')
xl = [0.1*data.shape[1]]
yl = [0.95*data.shape[0]]
ul = [1.0/self.pix_size.value]
vl = [0.0]
ax.quiver(xl, yl, ul, vl, scale_units='xy', scale=0.2, pivot='tip', color='red')
ax.scatter([slope_results['center'][0]], [slope_results['center'][1]])
ax.text(0.12*data.shape[1], 0.95*data.shape[0], "1{0:unicode}".format(u.arcsec), verticalalignment='center')
ax.set_title("Seeing: %.2f\" (%.2f\" @ zenith)" % (raw_seeing.value, seeing.value))
results = {}
results['seeing'] = seeing
results['raw_seeing'] = raw_seeing
results['slopes'] = slopes
results['ref_slopes'] = ref_slopes
results['ref_zv'] = ref_zv
results['spots'] = slope_results['spots']
results['pup_coords'] = coords
results['ref_pup_coords'] = ref_pup_coords
results['apertures'] = aps
results['xspacing'] = slope_results['spacing'][0]
results['yspacing'] = slope_results['spacing'][1]
results['xcen'] = slope_results['center'][0]
results['ycen'] = slope_results['center'][1]
results['pup_mask'] = pup_mask
results['data'] = data
results['header'] = hdr
results['rotator'] = rotator
results['mode'] = mode
results['ref_mask'] = ref_mask
results['src_mask'] = src_mask
results['fwhm'] = stats.funcs.gaussian_sigma_to_fwhm * slope_results['spot_sigma']
results['figures'] = figures
results['grid_fit'] = slope_results['grid_fit']
return results
def fit_wavefront(self, slope_results, plot=True):
"""
Use results from self.measure_slopes() to fit a set of zernike polynomials to the wavefront shape.
"""
plot = plot and self.plot
if slope_results['slopes'] is not None:
results = {}
slopes = -self.tiltfactor * slope_results['slopes']
coords = slope_results['ref_pup_coords']
rho, phi = cart2pol(coords)
zref = slope_results['ref_zv']
params = make_init_pars(nmodes=self.nzern, init_zv=zref)
results['fit_report'] = lmfit.minimize(slope_diff, params, args=(coords, slopes))
zfit = ZernikeVector(coeffs=results['fit_report'])
results['raw_zernike'] = zfit
# derotate the zernike solution to match the primary mirror coordinate system
total_rotation = self.rotation - slope_results['rotator']
zv_rot = ZernikeVector(coeffs=results['fit_report'])
zv_rot.rotate(angle=-total_rotation)
results['rot_zernike'] = zv_rot
# subtract the reference aberrations
zsub = zv_rot - zref
results['ref_zernike'] = zref
results['zernike'] = zsub
pred_slopes = np.array(zernike_slopes(zfit, rho, phi))
diff = slopes - pred_slopes
diff_pix = diff / self.tiltfactor
rms = np.sqrt((diff[0]**2 + diff[1]**2).mean())
results['residual_rms_asec'] = rms / self.telescope.nmperasec * u.arcsec
results['residual_rms'] = rms * zsub.units
results['zernike_rms'] = zsub.rms
results['zernike_p2v'] = zsub.peak2valley
fig = None
if plot:
ref_mask = slope_results['ref_mask']
src_mask = slope_results['src_mask']
im = slope_results['data']
gnorm = wfs_norm(im)
fig, ax = plt.subplots()
fig.set_label("Zernike Fit Residuals")
ax.imshow(im, cmap='Greys', origin='lower', norm=gnorm, interpolation='None')
x = slope_results['apertures'].positions.transpose()[0][src_mask]
y = slope_results['apertures'].positions.transpose()[1][src_mask]
ax.quiver(x, y, diff_pix[0][ref_mask], diff_pix[1][ref_mask], scale_units='xy',
scale=0.05, pivot='tip', color='red')
xl = [0.1*im.shape[1]]
yl = [0.95*im.shape[0]]
ul = [0.2/self.pix_size.value]
vl = [0.0]
ax.quiver(xl, yl, ul, vl, scale_units='xy', scale=0.05, pivot='tip', color='red')
ax.text(0.12*im.shape[1], 0.95*im.shape[0], "0.2{0:unicode}".format(u.arcsec), verticalalignment='center')
ax.text(
0.95*im.shape[1],
0.95*im.shape[0],
"Residual RMS: {0.value:0.2f}{0.unit:unicode}".format(results['residual_rms_asec']),
verticalalignment='center',
horizontalalignment='right'
)
iq = np.sqrt(results['residual_rms_asec']**2 +
(results['zernike_rms'].value / self.telescope.nmperasec * u.arcsec)**2)
ax.set_title("Image Quality: {0.value:0.2f}{0.unit:unicode}".format(iq))
results['resid_plot'] = fig
else:
results = None
return results
def calculate_primary(self, zv, threshold=0.0 * u.nm, mask=[]):
"""
Calculate force corrections to primary mirror and any required focus offsets. Use threshold to determine which
terms in 'zv' to use in the force calculations. Any terms with normalized amplitude less than threshold will
not be used in the force calculation. In addition, individual terms can be forced to be masked.
"""
zv.denormalize()
zv_masked = ZernikeVector()
zv_norm = zv.copy()
zv_norm.normalize()
log.debug(f"thresh: {threshold} mask {mask}")
for z in zv:
if abs(zv_norm[z]) >= threshold:
zv_masked[z] = zv[z]
log.debug(f"{z}: Good")
else:
log.debug(f"{z}: Bad")
zv_masked.denormalize() # need to assure we're using fringe coeffs
log.debug(f"\nInput masked: {zv_masked}")
# use any available error bars to mask down to 1 sigma below amplitude or 0 if error bars are larger than amplitude.
for z in zv_masked:
frac_err = 1. - min(zv_masked.frac_error(key=z), 1.)
zv_masked[z] *= frac_err
log.debug(f"\nErrorbar masked: {zv_masked}")
forces, m1focus, zv_allmasked = self.telescope.calculate_primary_corrections(
zv=zv_masked,
mask=mask,
gain=self.m1_gain
)
log.debug(f"\nAll masked: {zv_allmasked}")
return forces, m1focus, zv_allmasked
def calculate_focus(self, zv):
"""
Convert Zernike defocus to um of secondary offset.
"""
z_denorm = zv.copy()
z_denorm.denormalize() # need to assure we're using fringe coeffs
frac_err = 1. - min(z_denorm.frac_error(key='Z04'), 1.)
foc_corr = -self.m2_gain * frac_err * z_denorm['Z04'] / self.secondary.focus_trans
return foc_corr.round(2)
def calculate_cc(self, zv):
"""
Convert Zernike coma (Z07 and Z08) into arcsec of secondary center-of-curvature tilts.
"""
z_denorm = zv.copy()
z_denorm.denormalize() # need to assure we're using fringe coeffs
# fix coma using tilts around the M2 center of curvature.
y_frac_err = 1. - min(z_denorm.frac_error(key='Z07'), 1.)
x_frac_err = 1. - min(z_denorm.frac_error(key='Z08'), 1.)
cc_y_corr = -self.m2_gain * y_frac_err * z_denorm['Z07'] / self.secondary.theta_cc
cc_x_corr = -self.m2_gain * x_frac_err * z_denorm['Z08'] / self.secondary.theta_cc
return cc_x_corr.round(3), cc_y_corr.round(3)
def calculate_recenter(self, fit_results, defoc=1.0):
"""
Perform zero-coma hexapod tilts to align the pupil center to the center-of-rotation.
The location of the CoR is configured to be at self.cor_coords.
"""
xc = fit_results['xcen']
yc = fit_results['ycen']
xref = self.cor_coords[0]
yref = self.cor_coords[1]
dx = xc - xref
dy = yc - yref
total_rotation = u.Quantity(self.rotation - fit_results['rotator'], u.rad).value
dr, phi = cart2pol([dx, dy])
derot_phi = phi + total_rotation
az, el = pol2cart([dr, derot_phi])
az *= self.az_parity * self.pix_size * defoc # pix size scales with the pupil size as focus changes.
el *= self.el_parity * self.pix_size * defoc
return az.round(3), el.round(3)
def clear_m1_corrections(self):
"""
Clear corrections applied to the primary mirror. This includes the 'm1spherical' offsets sent to the secondary.
"""
log.info("Clearing WFS corrections from M1 and m1spherical offsets from M2.")
clear_forces, clear_m1focus = self.telescope.clear_forces()
return clear_forces, clear_m1focus
def clear_m2_corrections(self):
"""
Clear corrections sent to the secondary mirror, specifically the 'wfs' offsets.
"""
log.info("Clearing WFS offsets from M2's hexapod.")
cmds = self.secondary.clear_wfs()
return cmds
def clear_corrections(self):
"""
Clear all applied WFS corrections
"""
forces, m1focus = self.clear_m1_corrections()
cmds = self.clear_m2_corrections()
return forces, m1focus, cmds
def connect(self):
"""
Set state to connected
"""
self.telescope.connect()
self.secondary.connect()
if self.telescope.connected and self.secondary.connected:
self.connected = True
else:
self.connected = False
def disconnect(self):
"""
Set state to disconnected
"""
self.telescope.disconnect()
self.secondary.disconnect()
self.connected = False
class F9(WFS):
"""
Defines configuration and methods specific to the F/9 WFS system
"""
def __init__(self, config={}, plot=True):
super(F9, self).__init__(config=config, plot=plot)
self.connected = False
# set up CompMirror object
self.compmirror = CompMirror()
def connect(self):
"""
Run parent connect() method and then connect to the topbox if we can connect to the rest.
"""
super(F9, self).connect()
if self.connected:
self.compmirror.connect()
def disconnect(self):
"""
Run parent disconnect() method and then disconnect the topbox
"""
super(F9, self).disconnect()
self.compmirror.disconnect()
class NewF9(F9):
"""
Defines configuration and methods specific to the F/9 WFS system with the new SBIG CCD
"""
def process_image(self, fitsfile):
"""
Process the image to make it suitable for accurate wavefront analysis. Steps include nuking cosmic rays,
subtracting background, handling overscan regions, etc.
"""
rawdata, hdr = check_wfsdata(fitsfile, header=True)
cr_mask, data = detect_cosmics(rawdata, sigclip=15., niter=5, cleantype='medmask', psffwhm=10.)
# calculate the background and subtract it
bkg_estimator = photutils.ModeEstimatorBackground()
mask = photutils.make_source_mask(data, nsigma=2, npixels=7, dilate_size=13)
bkg = photutils.Background2D(data, (50, 50), filter_size=(15, 15), bkg_estimator=bkg_estimator, mask=mask)
data -= bkg.background
return data, hdr
class F5(WFS):
"""
Defines configuration and methods specific to the F/5 WFS systems
"""
def __init__(self, config={}, plot=True):
super(F5, self).__init__(config=config, plot=plot)
self.connected = False
self.sock = None
# load lookup table for off-axis aberrations
self.aberr_table = ascii.read(self.aberr_table_file)
def process_image(self, fitsfile):
"""
Process the image to make it suitable for accurate wavefront analysis. Steps include nuking cosmic rays,
subtracting background, handling overscan regions, etc.
"""
rawdata, hdr = check_wfsdata(fitsfile, header=True)
trimdata = self.trim_overscan(rawdata, hdr=hdr)
cr_mask, data = detect_cosmics(trimdata, sigclip=15., niter=5, cleantype='medmask', psffwhm=10.)
# calculate the background and subtract it
bkg_estimator = photutils.ModeEstimatorBackground()
mask = photutils.make_source_mask(data, nsigma=2, npixels=5, dilate_size=11)
bkg = photutils.Background2D(data, (20, 20), filter_size=(10, 10), bkg_estimator=bkg_estimator, mask=mask)
data -= bkg.background
return data, hdr
def ref_pupil_location(self, mode, hdr=None):
"""
For now we set the F/5 wfs center by hand based on engineering data. Should determine this more carefully.
"""
x = 262.0
y = 259.0
return x, y
def focal_plane_position(self, hdr):
"""
Need to fill this in for the hecto f/5 WFS system. For now will assume it's always on-axis.
"""
return 0.0 * u.deg, 0.0 * u.deg
def calculate_recenter(self, fit_results, defoc=1.0):
"""
Perform zero-coma hexapod tilts to align the pupil center to the center-of-rotation.
The location of the CoR is configured to be at self.cor_coords.
"""
xc = fit_results['xcen']
yc = fit_results['ycen']
xref = self.cor_coords[0]
yref = self.cor_coords[1]
dx = xc - xref
dy = yc - yref
cam_rotation = self.rotation - 90 * u.deg # pickoff plus fold mirror makes a 90 deg rotation
total_rotation = u.Quantity(cam_rotation - fit_results['rotator'], u.rad).value
dr, phi = cart2pol([dx, -dy]) # F/5 camera needs an up/down flip
derot_phi = phi + total_rotation
az, el = pol2cart([dr, derot_phi])
az *= self.az_parity * self.pix_size * defoc # pix size scales with the pupil size as focus changes.
el *= self.el_parity * self.pix_size * defoc
return az.round(3), el.round(3)
def reference_aberrations(self, mode, hdr=None):
"""
Create reference ZernikeVector for 'mode'. Pass 'hdr' to self.focal_plane_position() to get position of
the WFS when the data was acquired.
"""
# for most cases, this gets the reference focus
z_default = ZernikeVector(**self.modes[mode]['ref_zern'])
# now get the off-axis aberrations
z_offaxis = ZernikeVector()
if hdr is None:
log.warning("Missing WFS header. Assuming data is acquired on-axis.")
field_r = 0.0 * u.deg
field_phi = 0.0 * u.deg
else:
field_r, field_phi = self.focal_plane_position(hdr)
# ignore piston and x/y tilts
for i in range(4, 12):
k = "Z%02d" % i
z_offaxis[k] = np.interp(field_r.to(u.deg).value, self.aberr_table['field_r'], self.aberr_table[k]) * u.um
# remove the 90 degree offset between the MMT and zernike conventions and then rotate the offaxis aberrations
z_offaxis.rotate(angle=field_phi - 90. * u.deg)
z = z_default + z_offaxis
return z
class Binospec(F5):
"""
Defines configuration and methods specific to the Binospec WFS system. Binospec uses the same aberration table
as the F5 system so we inherit from that.
"""
def get_flipud(self, mode):
"""
Method to determine if the WFS image needs to be flipped up/down
During the first binospec commissioning run the images were flipped u/d as they came in. Since then, they are
left as-is and get flipped internally based on this flag. The reference file is already flipped.
"""
return True
def ref_pupil_location(self, mode, hdr=None):
"""
If a header is passed in, use Jan Kansky's linear relations to get the pupil center on the reference image.
Otherwise, use the default method.
"""
if hdr is None:
ref = self.modes[mode]['reference']
x = ref.xcen
y = ref.ycen
else:
for k in ['STARXMM', 'STARYMM']:
if k not in hdr:
# we'll be lenient for now with missing header info. if not provided, assume we're on-axis.
msg = f"Missing value, {k}, that is required to transform Binospec guider coordinates. Defaulting to 0.0."
log.warning(msg)
hdr[k] = 0.0
y = 232.771 + 0.17544 * hdr['STARXMM']
x = 265.438 + -0.20406 * hdr['STARYMM'] + 12.0
return x, y
def focal_plane_position(self, hdr):
"""
Transform from the Binospec guider coordinate system to MMTO focal plane coordinates.
"""
for k in ['ROT', 'STARXMM', 'STARYMM']:
if k not in hdr:
# we'll be lenient for now with missing header info. if not provided, assume we're on-axis.
msg = f"Missing value, {k}, that is required to transform Binospec guider coordinates. Defaulting to 0.0."
log.warning(msg)
hdr[k] = 0.0
guide_x = hdr['STARXMM']
guide_y = hdr['STARYMM']
rot = hdr['ROT']
guide_r = np.sqrt(guide_x**2 + guide_y**2) * u.mm
rot = u.Quantity(rot, u.deg) # make sure rotation is cast to degrees
# the MMTO focal plane coordinate convention has phi=0 aligned with +Y instead of +X
if guide_y != 0.0:
guide_phi = np.arctan2(guide_x, guide_y) * u.rad
else:
guide_phi = 90. * u.deg
# transform radius in guider coords to degrees in focal plane
focal_r = (guide_r / self.secondary.plate_scale).to(u.deg)
focal_phi = guide_phi + rot + self.rotation
log.debug(f"guide_phi: {guide_phi.to(u.rad)} rot: {rot}")
return focal_r, focal_phi
def in_wfs_region(self, xw, yw, x, y):
"""
Determine if a position is within the region available to Binospec's WFS
"""
return True # placekeeper until the optical prescription is implemented
def pupil_mask(self, hdr, npts=14):
"""
Generate a synthetic pupil mask
"""
if hdr is not None:
x_wfs = hdr.get('STARXMM', 150.0)
y_wfs = hdr.get('STARYMM', 0.0)
else:
x_wfs = 150.0
y_wfs = 0.0
log.warning("Header information not available for Binospec pupil mask. Assuming default position.")
good = []
center = self.pup_size / 2.
obsc = self.telescope.obscuration.value
spacing = 2.0 / npts
for x in np.arange(-1, 1, spacing):
for y in np.arange(-1, 1, spacing):
r = np.hypot(x, y)
if (r < 1 and np.hypot(x, y) >= obsc):
if self.in_wfs_region(x_wfs, y_wfs, x, y):
x_impos = center * (x + 1.)
y_impos = center * (y + 1.)
amp = 1.
# this is kind of a hacky way to dim spots near the edge, but easier than doing full calc
# of the aperture intersection with pupil. it also doesn't need to be that accurate for the
# purposes of the cross-correlation used to register the pupil.
if r > 1. - spacing:
amp = 1. - (r - (1. - spacing)) / spacing
if r - obsc < spacing:
amp = (r - obsc) / spacing
good.append((amp, x_impos, y_impos))
yi, xi = np.mgrid[0:self.pup_size, 0:self.pup_size]
im = np.zeros((self.pup_size, self.pup_size))
sigma = 3.
for g in good:
im += Gaussian2D(g[0], g[1], g[2], sigma, sigma)(xi, yi)
# Measured by hand from reference LED image
cam_rot = 0.595
im_rot = rotate(im, cam_rot, reshape=False)
im_rot[im_rot < 1e-2] = 0.0
return im_rot
class MMIRS(F5):
"""
Defines configuration and methods specific to the MMIRS WFS system
"""
def __init__(self, config={}, plot=True):
super(MMIRS, self).__init__(config=config, plot=plot)
# Parameters describing MMIRS pickoff mirror geometry
# Location and diameter of exit pupil
# Determined by tracing chief ray at 7.2' field angle with mmirs_asbuiltoptics_20110107_corronly.zmx
self.zp = 71.749 / 0.02714
self.dp = self.zp / 5.18661 # Working f/# from Zemax file
# Location of fold mirror
self.zm = 114.8
# Angle of fold mirror
self.am = 42 * u.deg
# Following dimensions from drawing MMIRS-1233_Rev1.pdf
# Diameter of pickoff mirror
self.pickoff_diam = (6.3 * u.imperial.inch).to(u.mm).value
# X size of opening in pickoff mirror
self.pickoff_xsize = (3.29 * u.imperial.inch).to(u.mm).value
# Y size of opening in pickoff mirror
self.pickoff_ysize = (3.53 * u.imperial.inch).to(u.mm).value
# radius of corner in pickoff mirror
self.pickoff_rcirc = (0.4 * u.imperial.inch).to(u.mm).value
def mirrorpoint(self, x0, y0, x, y):
"""
Compute intersection of ray with pickoff mirror.
The ray leaves the exit pupil at position x,y and hits the focal surface at x0,y0.
Math comes from http://geomalgorithms.com/a05-_intersect-1.html
"""
# Point in focal plane
P0 = np.array([x0, y0, 0])
# Point in exit pupil
P1 = np.array([x * self.dp / 2, y * self.dp / 2, self.zp])
# Pickoff mirror intesection with optical axis
V0 = np.array([0, 0, self.zm])
# normal to mirror
if (x0 < 0):
n = np.array([-np.sin(self.am), 0, np.cos(self.am)])
else:
n = np.array([np.sin(self.am), 0, np.cos(self.am)])
w = P0 - V0
# Vector connecting P0 to P1
u = P1 - P0
# Distance from P0 to intersection as a fraction of abs(u)
s = -n.dot(w) / n.dot(u)
# Intersection point on mirror
P = P0 + s * u
return (P[0], P[1])
def onmirror(self, x, y, side):
"""
Determine if a point is on the pickoff mirror surface:
x,y = coordinates of ray
side=1 means right face of the pickoff mirror, -1=left face
"""
if np.hypot(x, y) > self.pickoff_diam / 2.:
return False
if x * side < 0:
return False
x = abs(x)
y = abs(y)
if ((x > self.pickoff_xsize/2) or (y > self.pickoff_ysize/2)
or (x > self.pickoff_xsize/2 - self.pickoff_rcirc and y > self.pickoff_ysize/2 - self.pickoff_rcirc
and np.hypot(x - (self.pickoff_xsize/2 - self.pickoff_rcirc),
y - (self.pickoff_ysize/2 - self.pickoff_rcirc)) > self.pickoff_rcirc)):
return True
else:
return False
def drawoutline(self, ax):
"""
Draw outline of MMIRS pickoff mirror onto matplotlib axis, ax
"""
circ = np.arange(360) * u.deg
ax.plot(np.cos(circ) * self.pickoff_diam/2, np.sin(circ) * self.pickoff_diam/2, "b")
ax.set_aspect('equal', 'datalim')
ax.plot(
[-(self.pickoff_xsize/2 - self.pickoff_rcirc), (self.pickoff_xsize/2 - self.pickoff_rcirc)],
[self.pickoff_ysize/2, self.pickoff_ysize/2],
"b"
)
ax.plot(
[-(self.pickoff_xsize/2 - self.pickoff_rcirc), (self.pickoff_xsize/2 - self.pickoff_rcirc)],
[-self.pickoff_ysize/2, -self.pickoff_ysize/2],
"b"
)
ax.plot(
[-(self.pickoff_xsize/2), -(self.pickoff_xsize/2)],
[self.pickoff_ysize/2 - self.pickoff_rcirc, -(self.pickoff_ysize/2 - self.pickoff_rcirc)],
"b"
)
ax.plot(
[(self.pickoff_xsize/2), (self.pickoff_xsize/2)],
[self.pickoff_ysize/2 - self.pickoff_rcirc, -(self.pickoff_ysize/2 - self.pickoff_rcirc)],
"b"
)
ax.plot(
np.cos(circ[0:90]) * self.pickoff_rcirc + self.pickoff_xsize/2 - self.pickoff_rcirc,
np.sin(circ[0:90]) * self.pickoff_rcirc + self.pickoff_ysize/2 - self.pickoff_rcirc,
"b"
)
ax.plot(
np.cos(circ[90:180]) * self.pickoff_rcirc - self.pickoff_xsize/2 + self.pickoff_rcirc,
np.sin(circ[90:180]) * self.pickoff_rcirc + self.pickoff_ysize/2 - self.pickoff_rcirc,
"b"
)
ax.plot(
np.cos(circ[180:270]) * self.pickoff_rcirc - self.pickoff_xsize/2 + self.pickoff_rcirc,
np.sin(circ[180:270]) * self.pickoff_rcirc - self.pickoff_ysize/2 + self.pickoff_rcirc,
"b"
)
ax.plot(
np.cos(circ[270:360]) * self.pickoff_rcirc + self.pickoff_xsize/2 - self.pickoff_rcirc,
np.sin(circ[270:360]) * self.pickoff_rcirc - self.pickoff_ysize/2 + self.pickoff_rcirc,
"b"
)
ax.plot([0, 0], [self.pickoff_ysize/2, self.pickoff_diam/2], "b")
ax.plot([0, 0], [-self.pickoff_ysize/2, -self.pickoff_diam/2], "b")
def plotgrid(self, x0, y0, ax, npts=15):
"""
Plot a grid of points representing Shack-Hartmann apertures corresponding to wavefront sensor positioned at
a focal plane position of x0, y0 mm. This position is written in the FITS header keywords GUIDERX and GUIDERY.
"""
ngood = 0
for x in np.arange(-1, 1, 2.0 / npts):
for y in np.arange(-1, 1, 2.0 / npts):
if (np.hypot(x, y) < 1 and np.hypot(x, y) >= self.telescope.obscuration): # Only plot points w/in the pupil
xm, ym = self.mirrorpoint(x0, y0, x, y) # Get intersection with pickoff
if self.onmirror(xm, ym, x0/abs(x0)): # Find out if point is on the mirror surface
ax.scatter(xm, ym, 1, "g")
ngood += 1
else:
ax.scatter(xm, ym, 1, "r")
return ngood
def plotgrid_hdr(self, hdr, ax, npts=15):
"""
Wrap self.plotgrid() and get x0, y0 values from hdr.
"""
if 'GUIDERX' not in hdr or 'GUIDERY' not in hdr:
msg = "No MMIRS WFS position available in header."
raise WFSCommandException(value=msg)
x0 = hdr['GUIDERX']
y0 = hdr['GUIDERY']
ngood = self.plotgrid(x0, y0, ax=ax, npts=npts)
return ngood
def pupil_mask(self, hdr, npts=15):
"""
Use MMIRS pickoff mirror geometry to calculate the pupil mask
"""
if 'GUIDERX' not in hdr or 'GUIDERY' not in hdr:
msg = "No MMIRS WFS position available in header."
raise WFSCommandException(value=msg)
if 'CA' not in hdr:
msg = "No camera rotation angle available in header."
raise WFSCommandException(value=msg)
cam_rot = hdr['CA']
x0 = hdr['GUIDERX']
y0 = hdr['GUIDERY']
good = []
center = self.pup_size / 2.
obsc = self.telescope.obscuration.value
spacing = 2.0 / npts
for x in np.arange(-1, 1, spacing):
for y in np.arange(-1, 1, spacing):
r = np.hypot(x, y)
if (r < 1 and np.hypot(x, y) >= obsc):
xm, ym = self.mirrorpoint(x0, y0, x, y)
if self.onmirror(xm, ym, x0/abs(x0)):
x_impos = center * (x + 1.)
y_impos = center * (y + 1.)
amp = 1.
# this is kind of a hacky way to dim spots near the edge, but easier than doing full calc
# of the aperture intersection with pupil. it also doesn't need to be that accurate for the
# purposes of the cross-correlation used to register the pupil.
if r > 1. - spacing:
amp = 1. - (r - (1. - spacing)) / spacing
if r - obsc < spacing:
amp = (r - obsc) / spacing
good.append((amp, x_impos, y_impos))
yi, xi = np.mgrid[0:self.pup_size, 0:self.pup_size]
im = np.zeros((self.pup_size, self.pup_size))
sigma = 3.
for g in good:
im += Gaussian2D(g[0], g[1], g[2], sigma, sigma)(xi, yi)
# camera 2's lenslet array is rotated -1.12 deg w.r.t. the camera.
if hdr['CAMERA'] == 1:
cam_rot -= 1.12
im_rot = rotate(im, cam_rot, reshape=False)
im_rot[im_rot < 1e-2] = 0.0
return im_rot
def get_mode(self, hdr):
"""
For MMIRS we figure out the mode from which camera the image is taken with.
"""
cam = hdr['CAMERA']
mode = f"mmirs{cam}"
return mode
def trim_overscan(self, data, hdr=None):
"""
MMIRS leaves the overscan in, but doesn't give any header information. So gotta trim by hand...
"""
return data[5:, 12:]
def process_image(self, fitsfile):
"""
Process the image to make it suitable for accurate wavefront analysis. Steps include nuking cosmic rays,
subtracting background, handling overscan regions, etc.
"""
rawdata, hdr = check_wfsdata(fitsfile, header=True)
trimdata = self.trim_overscan(rawdata, hdr=hdr)
# MMIRS gets a lot of hot pixels/CRs so make a quick pass to nuke them
cr_mask, data = detect_cosmics(trimdata, sigclip=5., niter=5, cleantype='medmask', psffwhm=5.)
# calculate the background and subtract it
bkg_estimator = photutils.ModeEstimatorBackground()
mask = photutils.make_source_mask(data, nsigma=2, npixels=5, dilate_size=11)
bkg = photutils.Background2D(data, (20, 20), filter_size=(7, 7), bkg_estimator=bkg_estimator, mask=mask)
data -= bkg.background
return data, hdr
def focal_plane_position(self, hdr):
"""
Transform from the MMIRS guider coordinate system to MMTO focal plane coordinates.
"""
for k in ['ROT', 'GUIDERX', 'GUIDERY']:
if k not in hdr:
msg = f"Missing value, {k}, that is required to transform MMIRS guider coordinates."
raise WFSConfigException(value=msg)
guide_x = hdr['GUIDERX']
guide_y = hdr['GUIDERY']
rot = hdr['ROT']
guide_r = np.sqrt(guide_x**2 + guide_y**2)
rot = u.Quantity(rot, u.deg) # make sure rotation is cast to degrees
# the MMTO focal plane coordinate convention has phi=0 aligned with +Y instead of +X
if guide_y != 0.0:
guide_phi = np.arctan2(guide_x, guide_y) * u.rad
else:
guide_phi = 90. * u.deg
# transform radius in guider coords to degrees in focal plane
focal_r = (0.0016922 * guide_r - 4.60789e-9 * guide_r**3 - 8.111307e-14 * guide_r**5) * u.deg
focal_phi = guide_phi + rot + self.rotation
return focal_r, focal_phi
class FLWO12(WFS):
"""
Defines configuration and methods for the WFS on the FLWO 1.2-meter
"""
def trim_overscan(self, data, hdr=None):
# remove last column that is always set to 0
return data[:, :510]
class FLWO15(FLWO12):
"""
Defines configuration and methods for the WFS on the FLWO 1.5-meter
"""
pass
| bsd-3-clause |
drammock/mne-python | tutorials/preprocessing/70_fnirs_processing.py | 5 | 14145 | """
.. _tut-fnirs-processing:
Preprocessing functional near-infrared spectroscopy (fNIRS) data
================================================================
This tutorial covers how to convert functional near-infrared spectroscopy
(fNIRS) data from raw measurements to relative oxyhaemoglobin (HbO) and
deoxyhaemoglobin (HbR) concentration, view the average waveform, and
topographic representation of the response.
Here we will work with the :ref:`fNIRS motor data <fnirs-motor-dataset>`.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from itertools import compress
import mne
fnirs_data_folder = mne.datasets.fnirs_motor.data_path()
fnirs_cw_amplitude_dir = os.path.join(fnirs_data_folder, 'Participant-1')
raw_intensity = mne.io.read_raw_nirx(fnirs_cw_amplitude_dir, verbose=True)
raw_intensity.load_data()
###############################################################################
# View location of sensors over brain surface
# -------------------------------------------
#
# Here we validate that the location of sources-detector pairs and channels
# are in the expected locations. Source-detector pairs are shown as lines
# between the optodes, channels (the mid point of source-detector pairs) are
# optionally shown as orange dots. Source are optionally shown as red dots and
# detectors as black.
subjects_dir = mne.datasets.sample.data_path() + '/subjects'
fig = mne.viz.create_3d_figure(size=(800, 600), bgcolor='white')
fig = mne.viz.plot_alignment(raw_intensity.info, show_axes=True,
subject='fsaverage', coord_frame='mri',
trans='fsaverage', surfaces=['brain'],
fnirs=['channels', 'pairs',
'sources', 'detectors'],
subjects_dir=subjects_dir, fig=fig)
mne.viz.set_3d_view(figure=fig, azimuth=20, elevation=60, distance=0.4,
focalpoint=(0., -0.01, 0.02))
###############################################################################
# Selecting channels appropriate for detecting neural responses
# -------------------------------------------------------------
#
# First we remove channels that are too close together (short channels) to
# detect a neural response (less than 1 cm distance between optodes).
# These short channels can be seen in the figure above.
# To achieve this we pick all the channels that are not considered to be short.
picks = mne.pick_types(raw_intensity.info, meg=False, fnirs=True)
dists = mne.preprocessing.nirs.source_detector_distances(
raw_intensity.info, picks=picks)
raw_intensity.pick(picks[dists > 0.01])
raw_intensity.plot(n_channels=len(raw_intensity.ch_names),
duration=500, show_scrollbars=False)
###############################################################################
# Converting from raw intensity to optical density
# ------------------------------------------------
#
# The raw intensity values are then converted to optical density.
raw_od = mne.preprocessing.nirs.optical_density(raw_intensity)
raw_od.plot(n_channels=len(raw_od.ch_names),
duration=500, show_scrollbars=False)
###############################################################################
# Evaluating the quality of the data
# ----------------------------------
#
# At this stage we can quantify the quality of the coupling
# between the scalp and the optodes using the scalp coupling index. This
# method looks for the presence of a prominent synchronous signal in the
# frequency range of cardiac signals across both photodetected signals.
#
# In this example the data is clean and the coupling is good for all
# channels, so we will not mark any channels as bad based on the scalp
# coupling index.
sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od)
fig, ax = plt.subplots()
ax.hist(sci)
ax.set(xlabel='Scalp Coupling Index', ylabel='Count', xlim=[0, 1])
###############################################################################
# In this example we will mark all channels with a SCI less than 0.5 as bad
# (this dataset is quite clean, so no channels are marked as bad).
raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5))
###############################################################################
# At this stage it is appropriate to inspect your data
# (for instructions on how to use the interactive data visualisation tool
# see :ref:`tut-visualize-raw`)
# to ensure that channels with poor scalp coupling have been removed.
# If your data contains lots of artifacts you may decide to apply
# artifact reduction techniques as described in :ref:`ex-fnirs-artifacts`.
###############################################################################
# Converting from optical density to haemoglobin
# ----------------------------------------------
#
# Next we convert the optical density data to haemoglobin concentration using
# the modified Beer-Lambert law.
raw_haemo = mne.preprocessing.nirs.beer_lambert_law(raw_od)
raw_haemo.plot(n_channels=len(raw_haemo.ch_names),
duration=500, show_scrollbars=False)
###############################################################################
# Removing heart rate from signal
# -------------------------------
#
# The haemodynamic response has frequency content predominantly below 0.5 Hz.
# An increase in activity around 1 Hz can be seen in the data that is due to
# the person's heart beat and is unwanted. So we use a low pass filter to
# remove this. A high pass filter is also included to remove slow drifts
# in the data.
fig = raw_haemo.plot_psd(average=True)
fig.suptitle('Before filtering', weight='bold', size='x-large')
fig.subplots_adjust(top=0.88)
raw_haemo = raw_haemo.filter(0.05, 0.7, h_trans_bandwidth=0.2,
l_trans_bandwidth=0.02)
fig = raw_haemo.plot_psd(average=True)
fig.suptitle('After filtering', weight='bold', size='x-large')
fig.subplots_adjust(top=0.88)
###############################################################################
# Extract epochs
# --------------
#
# Now that the signal has been converted to relative haemoglobin concentration,
# and the unwanted heart rate component has been removed, we can extract epochs
# related to each of the experimental conditions.
#
# First we extract the events of interest and visualise them to ensure they are
# correct.
events, _ = mne.events_from_annotations(raw_haemo, event_id={'1.0': 1,
'2.0': 2,
'3.0': 3})
event_dict = {'Control': 1, 'Tapping/Left': 2, 'Tapping/Right': 3}
fig = mne.viz.plot_events(events, event_id=event_dict,
sfreq=raw_haemo.info['sfreq'])
fig.subplots_adjust(right=0.7) # make room for the legend
###############################################################################
# Next we define the range of our epochs, the rejection criteria,
# baseline correction, and extract the epochs. We visualise the log of which
# epochs were dropped.
reject_criteria = dict(hbo=80e-6)
tmin, tmax = -5, 15
epochs = mne.Epochs(raw_haemo, events, event_id=event_dict,
tmin=tmin, tmax=tmax,
reject=reject_criteria, reject_by_annotation=True,
proj=True, baseline=(None, 0), preload=True,
detrend=None, verbose=True)
epochs.plot_drop_log()
###############################################################################
# View consistency of responses across trials
# -------------------------------------------
#
# Now we can view the haemodynamic response for our tapping condition.
# We visualise the response for both the oxy- and deoxyhaemoglobin, and
# observe the expected peak in HbO at around 6 seconds consistently across
# trials, and the consistent dip in HbR that is slightly delayed relative to
# the HbO peak.
epochs['Tapping'].plot_image(combine='mean', vmin=-30, vmax=30,
ts_args=dict(ylim=dict(hbo=[-15, 15],
hbr=[-15, 15])))
###############################################################################
# We can also view the epoched data for the control condition and observe
# that it does not show the expected morphology.
epochs['Control'].plot_image(combine='mean', vmin=-30, vmax=30,
ts_args=dict(ylim=dict(hbo=[-15, 15],
hbr=[-15, 15])))
###############################################################################
# View consistency of responses across channels
# ---------------------------------------------
#
# Similarly we can view how consistent the response is across the optode
# pairs that we selected. All the channels in this data are located over the
# motor cortex, and all channels show a similar pattern in the data.
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6))
clims = dict(hbo=[-20, 20], hbr=[-20, 20])
epochs['Control'].average().plot_image(axes=axes[:, 0], clim=clims)
epochs['Tapping'].average().plot_image(axes=axes[:, 1], clim=clims)
for column, condition in enumerate(['Control', 'Tapping']):
for ax in axes[:, column]:
ax.set_title('{}: {}'.format(condition, ax.get_title()))
###############################################################################
# Plot standard fNIRS response image
# ----------------------------------
#
# Next we generate the most common visualisation of fNIRS data: plotting
# both the HbO and HbR on the same figure to illustrate the relation between
# the two signals.
evoked_dict = {'Tapping/HbO': epochs['Tapping'].average(picks='hbo'),
'Tapping/HbR': epochs['Tapping'].average(picks='hbr'),
'Control/HbO': epochs['Control'].average(picks='hbo'),
'Control/HbR': epochs['Control'].average(picks='hbr')}
# Rename channels until the encoding of frequency in ch_name is fixed
for condition in evoked_dict:
evoked_dict[condition].rename_channels(lambda x: x[:-4])
color_dict = dict(HbO='#AA3377', HbR='b')
styles_dict = dict(Control=dict(linestyle='dashed'))
mne.viz.plot_compare_evokeds(evoked_dict, combine="mean", ci=0.95,
colors=color_dict, styles=styles_dict)
###############################################################################
# View topographic representation of activity
# -------------------------------------------
#
# Next we view how the topographic activity changes throughout the response.
times = np.arange(-3.5, 13.2, 3.0)
topomap_args = dict(extrapolate='local')
epochs['Tapping'].average(picks='hbo').plot_joint(
times=times, topomap_args=topomap_args)
###############################################################################
# Compare tapping of left and right hands
# ---------------------------------------
#
# Finally we generate topo maps for the left and right conditions to view
# the location of activity. First we visualise the HbO activity.
times = np.arange(4.0, 11.0, 1.0)
epochs['Tapping/Left'].average(picks='hbo').plot_topomap(
times=times, **topomap_args)
epochs['Tapping/Right'].average(picks='hbo').plot_topomap(
times=times, **topomap_args)
###############################################################################
# And we also view the HbR activity for the two conditions.
epochs['Tapping/Left'].average(picks='hbr').plot_topomap(
times=times, **topomap_args)
epochs['Tapping/Right'].average(picks='hbr').plot_topomap(
times=times, **topomap_args)
###############################################################################
# And we can plot the comparison at a single time point for two conditions.
fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(9, 5),
gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1]))
vmin, vmax, ts = -8, 8, 9.0
evoked_left = epochs['Tapping/Left'].average()
evoked_right = epochs['Tapping/Right'].average()
evoked_left.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 0],
vmin=vmin, vmax=vmax, colorbar=False,
**topomap_args)
evoked_left.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 0],
vmin=vmin, vmax=vmax, colorbar=False,
**topomap_args)
evoked_right.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 1],
vmin=vmin, vmax=vmax, colorbar=False,
**topomap_args)
evoked_right.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 1],
vmin=vmin, vmax=vmax, colorbar=False,
**topomap_args)
evoked_diff = mne.combine_evoked([evoked_left, evoked_right], weights=[1, -1])
evoked_diff.plot_topomap(ch_type='hbo', times=ts, axes=axes[0, 2:],
vmin=vmin, vmax=vmax, colorbar=True,
**topomap_args)
evoked_diff.plot_topomap(ch_type='hbr', times=ts, axes=axes[1, 2:],
vmin=vmin, vmax=vmax, colorbar=True,
**topomap_args)
for column, condition in enumerate(
['Tapping Left', 'Tapping Right', 'Left-Right']):
for row, chroma in enumerate(['HbO', 'HbR']):
axes[row, column].set_title('{}: {}'.format(chroma, condition))
fig.tight_layout()
###############################################################################
# Lastly, we can also look at the individual waveforms to see what is
# driving the topographic plot above.
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4))
mne.viz.plot_evoked_topo(epochs['Left'].average(picks='hbo'), color='b',
axes=axes, legend=False)
mne.viz.plot_evoked_topo(epochs['Right'].average(picks='hbo'), color='r',
axes=axes, legend=False)
# Tidy the legend.
leg_lines = [line for line in axes.lines if line.get_c() == 'b'][:1]
leg_lines.append([line for line in axes.lines if line.get_c() == 'r'][0])
fig.legend(leg_lines, ['Left', 'Right'], loc='lower right')
| bsd-3-clause |
apache/incubator-airflow | tests/providers/amazon/aws/transfers/test_hive_to_dynamodb.py | 7 | 4590 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import json
import unittest
from unittest import mock
import pandas as pd
import airflow.providers.amazon.aws.transfers.hive_to_dynamodb
from airflow.models.dag import DAG
from airflow.providers.amazon.aws.hooks.dynamodb import AwsDynamoDBHook
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
try:
from moto import mock_dynamodb2
except ImportError:
mock_dynamodb2 = None
class TestHiveToDynamoDBOperator(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.sql = 'SELECT 1'
self.hook = AwsDynamoDBHook(aws_conn_id='aws_default', region_name='us-east-1')
@staticmethod
def process_data(data, *args, **kwargs):
return json.loads(data.to_json(orient='records'))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_conn_returns_a_boto3_connection(self):
hook = AwsDynamoDBHook(aws_conn_id='aws_default')
self.assertIsNotNone(hook.get_conn())
@mock.patch(
'airflow.providers.apache.hive.hooks.hive.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid')], columns=['id', 'name']),
)
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_records_with_schema(self, mock_get_pandas_df):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{'AttributeName': 'id', 'KeyType': 'HASH'},
],
AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}],
ProvisionedThroughput={'ReadCapacityUnits': 10, 'WriteCapacityUnits': 10},
)
operator = airflow.providers.amazon.aws.transfers.hive_to_dynamodb.HiveToDynamoDBOperator(
sql=self.sql,
table_name="test_airflow",
task_id='hive_to_dynamodb_check',
table_keys=['id'],
dag=self.dag,
)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter('table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
@mock.patch(
'airflow.providers.apache.hive.hooks.hive.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid'), ('1', 'gupta')], columns=['id', 'name']),
)
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_pre_process_records_with_schema(self, mock_get_pandas_df):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{'AttributeName': 'id', 'KeyType': 'HASH'},
],
AttributeDefinitions=[{'AttributeName': 'id', 'AttributeType': 'S'}],
ProvisionedThroughput={'ReadCapacityUnits': 10, 'WriteCapacityUnits': 10},
)
operator = airflow.providers.amazon.aws.transfers.hive_to_dynamodb.HiveToDynamoDBOperator(
sql=self.sql,
table_name='test_airflow',
task_id='hive_to_dynamodb_check',
table_keys=['id'],
pre_process=self.process_data,
dag=self.dag,
)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter('table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
| apache-2.0 |
boland1992/seissuite_iran | build/lib/seissuite/ant/pscrosscorr.py | 1 | 151866 | #!/usr/bin/env python
"""
Module that contains classes holding cross-correlations and related
processing, such as frequency-time analysis (FTAN) to measure
dispersion curves.
"""
from seissuite.ant import pserrors, psutils, pstomo
import obspy.signal
try:
import obspy.io.xseed
except:
import obspy.xseed
import obspy.signal.cross_correlation
import obspy.signal.filter
from obspy.core import AttribDict#, read, UTCDateTime, Stream
from obspy.signal.invsim import cosTaper
import numpy as np
from numpy.fft import rfft, irfft, fft, ifft, fftfreq
from scipy import integrate
from scipy.interpolate import RectBivariateSpline, interp1d
from scipy.optimize import minimize
import itertools as it
import os
import shutil
import glob
import pickle
import copy
from collections import OrderedDict
import datetime as dt
from calendar import monthrange
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import gridspec
from obspy.core import Trace, Stream
import scipy.stats as stats
import pylab as pl
#from pyseis.modules.rdreftekc import rdreftek, reftek2stream
#
#def read_ref(path):
# ref_head, ref_data = rdreftek(path)
# st = reftek2stream(ref_head, ref_data)
# return st
plt.ioff() # turning off interactive mode
# ====================================================
# parsing configuration file to import some parameters
# ====================================================
# import CONFIG class initalised in ./configs/tmp_config.pickle
config_pickle = 'configs/tmp_config.pickle'
f = open(name=config_pickle, mode='rb')
CONFIG = pickle.load(f)
f.close()
# import variables from initialised CONFIG class.
MSEED_DIR = CONFIG.MSEED_DIR
DATABASE_DIR = CONFIG.DATABASE_DIR
DATALESS_DIR = CONFIG.DATALESS_DIR
STATIONXML_DIR = CONFIG.STATIONXML_DIR
CROSSCORR_DIR = CONFIG.CROSSCORR_DIR
USE_DATALESSPAZ = CONFIG.USE_DATALESSPAZ
USE_STATIONXML = CONFIG.USE_STATIONXML
CROSSCORR_STATIONS_SUBSET = CONFIG.CROSSCORR_STATIONS_SUBSET
CROSSCORR_SKIPLOCS = CONFIG.CROSSCORR_SKIPLOCS
FIRSTDAY = CONFIG.FIRSTDAY
LASTDAY = CONFIG.LASTDAY
MINFILL = CONFIG.MINFILL
FREQMIN = CONFIG.FREQMIN
FREQMAX = CONFIG.FREQMAX
CORNERS = CONFIG.CORNERS
ZEROPHASE = CONFIG.ZEROPHASE
PERIOD_RESAMPLE = CONFIG.PERIOD_RESAMPLE
ONEBIT_NORM = CONFIG.ONEBIT_NORM
FREQMIN_EARTHQUAKE = CONFIG.FREQMIN_EARTHQUAKE
FREQMAX_EARTHQUAKE = CONFIG.FREQMAX_EARTHQUAKE
WINDOW_TIME = CONFIG.WINDOW_TIME
WINDOW_FREQ = CONFIG.WINDOW_FREQ
XCORR_INTERVAL = CONFIG.XCORR_INTERVAL
CROSSCORR_TMAX = CONFIG.CROSSCORR_TMAX
CROSSCORR_DIR = CONFIG.CROSSCORR_DIR
FTAN_DIR = CONFIG.FTAN_DIR
PERIOD_BANDS = CONFIG.PERIOD_BANDS
CROSSCORR_TMAX = CONFIG.CROSSCORR_TMAX
PERIOD_RESAMPLE = CONFIG.PERIOD_RESAMPLE
SIGNAL_WINDOW_VMIN = CONFIG.SIGNAL_WINDOW_VMIN
SIGNAL_WINDOW_VMAX = CONFIG.SIGNAL_WINDOW_VMAX
SIGNAL2NOISE_TRAIL = CONFIG.SIGNAL2NOISE_TRAIL
NOISE_WINDOW_SIZE = CONFIG.NOISE_WINDOW_SIZE
RAWFTAN_PERIODS = CONFIG.RAWFTAN_PERIODS
CLEANFTAN_PERIODS = CONFIG.CLEANFTAN_PERIODS
FTAN_VELOCITIES = CONFIG.FTAN_VELOCITIES
FTAN_ALPHA = CONFIG.FTAN_ALPHA
STRENGTH_SMOOTHING = CONFIG.STRENGTH_SMOOTHING
USE_INSTANTANEOUS_FREQ = CONFIG.USE_INSTANTANEOUS_FREQ
MAX_RELDIFF_INST_NOMINAL_PERIOD = CONFIG.MAX_RELDIFF_INST_NOMINAL_PERIOD
MIN_INST_PERIOD = CONFIG.MIN_INST_PERIOD
HALFWINDOW_MEDIAN_PERIOD = CONFIG.HALFWINDOW_MEDIAN_PERIOD
MAX_RELDIFF_INST_MEDIAN_PERIOD = CONFIG.MAX_RELDIFF_INST_MEDIAN_PERIOD
BBOX_LARGE = CONFIG.BBOX_LARGE
BBOX_SMALL = CONFIG.BBOX_SMALL
FIRSTDAY = CONFIG.FIRSTDAY
LASTDAY = CONFIG.LASTDAY
#FULL_COMB = CONFIG.FULL_COMB
max_snr_pickle = os.path.join(DATALESS_DIR, 'snr.pickle')
#if os.path.exists(max_snr_pickle):
# global max_snr
# f = open(name=max_snr_pickle, mode='rb')
# max_snr = pickle.load(f)
# f.close()
#else:
# print "No maximum SNR file, must calculate to run SNR weighted stacking."
# ========================
# Constants and parameters
# ========================
EPS = 1.0e-5
ONESEC = dt.timedelta(seconds=1)
class MonthYear:
"""
Hashable class holding a month of a year
"""
def __init__(self, *args, **kwargs):
"""
Usage: MonthYear(3, 2012) or MonthYear(month=3, year=2012) or
MonthYear(date[time](2012, 3, 12))
"""
if len(args) == 2 and not kwargs:
month, year = args
elif not args and set(kwargs.keys()) == {'month', 'year'}:
month, year = kwargs['month'], kwargs['year']
elif len(args) == 1 and not kwargs:
month, year = args[0].month, args[0].year
else:
s = ("Usage: MonthYear(3, 2012) or MonthYear(month=3, year=2012) or "
"MonthYear(date[time](2012, 3, 12))")
raise Exception(s)
self.m = month
self.y = year
def __str__(self):
"""
E.g., 03-2012
"""
return '{:02d}-{}'.format(self.m, self.y)
def __repr__(self):
"""
E.g., <03-2012>
"""
return '<{}>'.format(str(self))
def __eq__(self, other):
"""
Comparison with other, which can be a MonthYear object,
or a sequence of int (month, year)
@type other: L{MonthYear} or (int, int)
"""
try:
return self.m == other.m and self.y == other.y
except:
try:
return (self.m, self.y) == tuple(other)
except:
return False
def __hash__(self):
return hash(self.m) ^ hash(self.y)
class MonthCrossCorrelation:
"""
Class holding cross-correlation over a single month
"""
def __init__(self, month, ndata):
"""
@type month: L{MonthYear}
@type ndata: int
"""
# attaching month and year
self.month = month
# initializing stats
self.nday = 0
# data array of month cross-correlation
self.dataarray = np.zeros(ndata)
def monthfill(self):
"""
Returns the relative month fill (between 0-1)
"""
return float(self.nday) / monthrange(year=self.month.y, month=self.month.m)[1]
def __repr__(self):
s = '<cross-correlation over single month {}: {} days>'
return s.format(self.month, self.nday)
class CrossCorrelation:
"""
Cross-correlation class, which contains:
- a pair of stations
- a pair of sets of locations (from trace.location)
- a pair of sets of ids (from trace.id)
- start day, end day and nb of days of cross-correlation
- distance between stations
- a time array and a (cross-correlation) data array
"""
def __init__(self, station1, station2, xcorr_dt=PERIOD_RESAMPLE,
xcorr_tmax=CROSSCORR_TMAX):
"""
@type station1: L{pysismo.psstation.Station}
@type station2: L{pysismo.psstation.Station}
@type xcorr_dt: float
@type xcorr_tmax: float
"""
# pair of stations
self.station1 = station1
self.station2 = station2
# locations and trace ids of stations
self.locs1 = set()
self.locs2 = set()
self.ids1 = set()
self.ids2 = set()
# initializing stats
self.startday = None
self.endday = None
self.nday = 0
self.restart_time = None
# initializing time and data arrays of cross-correlation
nmax = int(xcorr_tmax / xcorr_dt)
self.timearray = np.arange(-nmax * xcorr_dt, (nmax + 1)*xcorr_dt, xcorr_dt)
self.dataarray = np.zeros(2 * nmax + 1)
self.phasearray = np.zeros(2 * nmax + 1)
self.pws = np.zeros(2 * nmax + 1)
self.SNR_lin = [] #SNR with each time-step for linear stack
self.SNR_pws = [] #SNR with each time-step for phase-weighted stack
self.SNR_stack = None
self.SNR_max = 0
self.comb_stack = None
# has cross-corr been symmetrized? whitened?
self.symmetrized = False
self.whitened = False
# initializing list of cross-correlations over a single month
self.monthxcs = []
def __repr__(self):
s = '<cross-correlation between stations {0}-{1}: avg {2} stacks>'
return s.format(self.station1.name, self.station2.name, self.nday)
def __str__(self):
"""
E.g., 'Cross-correlation between stations SPB['10'] - ITAB['00','10']:
365 days from 2002-01-01 to 2002-12-01'
"""
locs1 = ','.join(sorted("'{}'".format(loc) for loc in self.locs1))
locs2 = ','.join(sorted("'{}'".format(loc) for loc in self.locs2))
s = ('Cross-correlation between stations '
'{sta1}[{locs1}]-{sta2}[{locs2}]: '
'{nday} stacks from {start} to {end}')
return s.format(sta1=self.station1.name, locs1=locs1,
sta2=self.station2.name, locs2=locs2, nday=self.nday,
start=self.startday, end=self.endday)
def dist(self):
"""
Geodesic distance (in km) between stations, using the
WGS-84 ellipsoidal model of the Earth
"""
return self.station1.dist(self.station2)
def copy(self):
"""
Makes a copy of self
"""
# shallow copy
result = copy.copy(self)
# copy of month cross-correlations
result.monthxcs = [copy.copy(mxc) for mxc in self.monthxcs]
return result
def phase_stack(self, tr1, tr2, xcorr=None):
"""
This function is used when the input parameter stack_type=='phase'
and applies the technique of Schimmel et al. (1997) and stacks
cross-correlation waveforms based on their instaneous phases. The
technique is known as phase stacking (PS).
"""
# cross-correlation
if xcorr is None:
# calculating cross-corr using obspy, if not already provided
xcorr = obspy.signal.cross_correlation.xcorr(
tr1, tr2, shift_len=self._get_xcorr_nmax(), full_xcorr=True)[2]
# verifying that we don't have NaN
if np.any(np.isnan(xcorr)):
s = u"Got NaN in cross-correlation between traces:\n{tr1}\n{tr2}"
raise pserrors.NaNError(s.format(tr1=tr1, tr2=tr2))
inst_phase = np.arctan2(xcorr, range(0,len(xcorr)))
# phase-stacking cross-corr
self.phasearray += np.real(np.exp(1j*inst_phase))
# reduce stack about zero
#self.phasearray = self.phasearray - np.mean(self.phasearray)
# normalise about 0, max amplitude 1
def phase_weighted_stack(self, power_v=2):
"""
This function is applies the technique of Schimmel et al. (1997)
and stacks cross-correlation waveforms based on their instaneous
phases. The technique is known as phase-weighted stacking (PWS).
This uses a combination of the phase and linear stack and the number
of stacks performed. power_v variable is described in Schimmel et al.
and is almost always set at 2 for successful stacking.
"""
# this function only produces the most recent PWS, NOT stacking
# each iteration one atop the other! note also that nday is the
# number of iterations performed, NOT the number of days passed.
linear_comp = self.dataarray #- np.mean(self.dataarray))
phase_comp = np.abs(self.phasearray)# - \
#np.mean(np.abs(self.phasearray)))
self.pws += linear_comp * (phase_comp ** power_v)
#plt.figure()
#plt.plot(np.linspace(np.min(self.pws), np.max(self.pws),
# len(self.pws)), self.pws, alpha=0.5, c='r')
#plt.plot(np.linspace(np.min(self.pws), np.max(self.pws),
# len(self.pws)), self.pws, c='b', alpha=0.5)
#plt.show()
#self.pws = self.pws / np.max(self.pws)
#self.pws = self.pws - np.mean(self.pws)
def snr_weighted_stack(self):
"""
This function is applies the technique of Schimmel et al. (1997)
and stacks cross-correlation waveforms based on their instaneous
phases. The technique is known as phase-weighted stacking (PWS).
This uses a combination of the phase and linear stack and the number
of stacks performed. power_v variable is described in Schimmel et al.
and is almost always set at 2 for successful stacking.
"""
# this function only produces the most recent PWS, NOT stacking
# each iteration one atop the other! note also that nday is the
# number of iterations performed, NOT the number of days passed.
linear_comp = self.dataarray #- np.mean(self.dataarray))
rms = np.sqrt(np.mean(np.square(linear_comp)))
snr = np.max(linear_comp) / rms
if snr > self.SNR_max:
self.SNR_max = snr
snr_weight = snr / self.SNR_max
if self.SNR_stack is None:
self.SNR_stack = linear_comp
self.SNR_stack += self.SNR_stack * snr_weight
else:
self.SNR_stack += self.SNR_stack * snr_weight
def combined_stack(self, power_v=2):
"""
This function is applies the technique of Schimmel et al. (1997)
and stacks cross-correlation waveforms based on their instaneous
phases. The technique is known as phase-weighted stacking (PWS).
This uses a combination of the phase and linear stack and the number
of stacks performed. power_v variable is described in Schimmel et al.
and is almost always set at 2 for successful stacking.
"""
# this function only produces the most recent PWS, NOT stacking
# each iteration one atop the other! note also that nday is the
# number of iterations performed, NOT the number of days passed.
try:
phase_comp = np.abs(self.phasearray)
self.comb_stack = self.SNR_stack * (phase_comp ** power_v)
except Exception as error:
e = error
def add(self, tr1, tr2, xcorr=None):
"""
Stacks cross-correlation between 2 traces
@type tr1: L{obspy.core.trace.Trace}
@type tr2: L{obspy.core.trace.Trace}
"""
# verifying sampling rates
#try:
# assert 1.0 / tr1.stats.sampling_rate == self._get_xcorr_dt()
# assert 1.0 / tr2.stats.sampling_rate == self._get_xcorr_dt()
#except AssertionError:
# s = 'Sampling rates of traces are not equal:\n{tr1}\n{tr2}'
# raise Exception(s.format(tr1=tr1, tr2=tr2))
# cross-correlation
if xcorr is None:
# calculating cross-corr using obspy, if not already provided
xcorr = obspy.signal.cross_correlation.xcorr(
tr1, tr2, shift_len=self._get_xcorr_nmax(), full_xcorr=True)[2]
# verifying that we don't have NaN
if np.any(np.isnan(xcorr)):
s = u"Got NaN in cross-correlation between traces:\n{tr1}\n{tr2}"
raise pserrors.NaNError(s.format(tr1=tr1, tr2=tr2))
#print "FULL_COMB: ", FULL_COMB
#if FULL_COMB:
# xcorr1 = obspy.signal.cross_correlation.xcorr(
# tr1, tr2, shift_len=self._get_xcorr_nmax(), full_xcorr=True)[2]
# xcorr2 = obspy.signal.cross_correlation.xcorr(
# tr2, tr1, shift_len=self._get_xcorr_nmax(), full_xcorr=True)[2]
# generate obspy Stream object to save to miniseed
# tr_start, tr_end = tr1.stats.starttime, tr1.stats.endtime
# stat1, stat2 = tr1.stats.station, tr2.stats.station
# timelist_dir = sorted(os.listdir(CROSSCORR_DIR))
# xcorr_mseed = os.path.join(CROSSCORR_DIR, timelist_dir[-1],
# 'mseed')
# if not os.path.exists(xcorr_mseed): os.mkdir(xcorr_mseed)
# xcorr_str = 'xcorr_{}-{}_{}-{}.mseed'.format(stat1, stat2,
# tr_start, tr_end)
# trace1, trace2 = Trace(data=xcorr1), Trace(data=xcorr2)
# assign header metadata for xcorr mseed
# trace1.stats.network, trace2.stats.network = \
# tr1.stats.network, tr2.stats.network
# trace1.stats.station, trace2.stats.station = stat1, stat2
# trace1.stats.channel, trace2.stats.channel = \
# tr1.stats.channel, tr2.stats.channel
# xcorr_st = Stream(traces=[trace1, trace2])
# xcorr_output = os.path.join(xcorr_mseed, xcorr_str)
# print "xcorr_output: ", xcorr_output
# xcorr_st.write(xcorr_output, format='MSEED')
#
self.dataarray += xcorr#/ np.max(xcorr) #/ (self.nday + 1)
# reduce stack about zero
#self.dataarray = self.dataarray - np.mean(self.dataarray)
# normalise about 0, max amplitude 1
#self.dataarray = self.dataarray / np.max(self.dataarray)
#self.phase_stack(tr1, tr2, xcorr=xcorr)
#self.phase_weighted_stack()
#self.snr_weighted_stack()
#self.combined_stack()
#plt.figure()
#plt.title('pws')
#plt.plot(self.timearray, self.pws)
#plt.show()
#plt.clf()
# updating stats: 1st day, last day, nb of days of cross-corr
startday = (tr1.stats.starttime + ONESEC)
self.startday = min(self.startday, startday) if self.startday else startday
endday = (tr1.stats.endtime - ONESEC)
self.restart_time = endday
self.endday = max(self.endday, endday) if self.endday else endday
self.nday += 1
# stacking cross-corr over single month
month = MonthYear((tr1.stats.starttime + ONESEC).date)
try:
monthxc = next(monthxc for monthxc in self.monthxcs
if monthxc.month == month)
except StopIteration:
# appending new month xc
monthxc = MonthCrossCorrelation(month=month, ndata=len(self.timearray))
self.monthxcs.append(monthxc)
if monthxc.dataarray.shape != xcorr.shape:
monthxc.dataarray[:-1] += xcorr
else:
monthxc.dataarray += xcorr
monthxc.nday += 1
# updating (adding) locs and ids
self.locs1.add(tr1.stats.location)
self.locs2.add(tr2.stats.location)
self.ids1.add(tr1.id)
self.ids2.add(tr2.id)
def symmetrize(self, inplace=False):
"""
Symmetric component of cross-correlation (including
the list of cross-corr over a single month).
Returns self if already symmetrized or inPlace=True
@rtype: CrossCorrelation
"""
if self.symmetrized:
# already symmetrized
return self
# symmetrizing on self or copy of self
xcout = self if inplace else self.copy()
n = len(xcout.timearray)
mid = (n - 1) / 2
if n % 2 != 1:
xcout.timearray = xcout.timearray[:-1]
n = len(xcout.timearray)
# verifying that time array is symmetric wrt 0
if n % 2 != 1:
raise Exception('Cross-correlation cannot be symmetrized')
if not np.alltrue(xcout.timearray[mid:] + xcout.timearray[mid::-1] < EPS):
raise Exception('Cross-correlation cannot be symmetrized')
# calculating symmetric component of cross-correlation
xcout.timearray = xcout.timearray[mid:]
for obj in [xcout] + (xcout.monthxcs if hasattr(xcout, 'monthxcs') else []):
a = obj.dataarray
a_mid = a[mid:]
a_mid_rev = a_mid[::-1]
obj.dataarray = (a_mid + a_mid_rev) / 2.0
xcout.symmetrized = True
return xcout
def whiten(self, inplace=False, window_freq=0.004,
bandpass_tmin=7.0, bandpass_tmax=150):
"""
Spectral whitening of cross-correlation (including
the list of cross-corr over a single month).
@rtype: CrossCorrelation
"""
if hasattr(self, 'whitened') and self.whitened:
# already whitened
return self
# whitening on self or copy of self
xcout = self if inplace else self.copy()
# frequency step
npts = len(xcout.timearray)
sampling_rate = 1.0 / xcout._get_xcorr_dt()
deltaf = sampling_rate / npts
# loop over cross-corr and one-month stacks
for obj in [xcout] + (xcout.monthxcs if hasattr(xcout, 'monthxcs') else []):
a = obj.dataarray
# Fourier transform
ffta = rfft(a)
# smoothing amplitude spectrum
halfwindow = int(round(window_freq / deltaf / 2.0))
weight = psutils.moving_avg(abs(ffta), halfwindow=halfwindow)
a[:] = irfft(ffta / weight, n=npts)
# bandpass to avoid low/high freq noise
obj.dataarray = psutils.bandpass_butterworth(data=a,
dt=xcout._get_xcorr_dt(),
periodmin=bandpass_tmin,
periodmax=bandpass_tmax)
xcout.whitened = True
return xcout
def signal_noise_windows2(self, vmin, vmax, signal2noise_trail,
noise_window_size):
"""
Returns the signal window and the noise window.
The signal window is defined by *vmin* and *vmax*:
dist/*vmax* < t < dist/*vmin*
The noise window starts *signal2noise_trail* after the
signal window and has a size of *noise_window_size*:
t > dist/*vmin* + *signal2noise_trail*
t < dist/*vmin* + *signal2noise_trail* + *noise_window_size*
If the noise window hits the time limit of the cross-correlation,
we try to extend it to the left until it hits the signal
window.
@rtype: (float, float), (float, float)
"""
#print "distance is {} km.".format(self.dist())
# signal window
tmin_signal = self.dist() / vmax
tmax_signal = self.dist() / vmin
# noise window
tmin_noise = tmax_signal + signal2noise_trail
tmax_noise = tmin_noise + noise_window_size
if tmax_noise > self.timearray.max() or tmin_noise > self.timearray.max():
# the noise window hits the rightmost limit:
# let's shift it to the left without crossing
# the signal window
delta = min(tmax_noise-self.timearray.max(),tmin_noise-tmax_signal)
tmin_noise -= delta
tmax_noise -= delta
tmin_noise = tmax_signal
tmax_noise = self.timearray.max()
return (tmin_signal, tmax_signal), (tmin_noise, tmax_noise)
def signal_noise_windows(self, vmin, vmax, signal2noise_trail,
noise_window_size):
"""
The following function takes the data and time arrays and
sets about to quickly and crudely find the signal to noise ratios
of the given data-arrays. It takes a signal width of 10 indices.
"""
dataarray = list(self.dataarray)
timearray = list(self.timearray)
index_max = dataarray.index(max(dataarray))
tmin_signal = timearray[index_max] - 10
tmax_signal = timearray[index_max] + 10
tmin_noise = 0
tmax_noise = tmin_noise + noise_window_size
if tmax_noise > tmin_signal:
tmax_noise = tmin_signal - 1
if tmin_signal <= 0.0 or tmax_signal:
tmin_signal = timearray[0]
tmax_signal = timearray[20]
tmin_noise = timearray[int(len(timearray) - 1 - noise_window_size)]
tmax_noise = timearray[int(len(timearray) - 1)]
return (tmin_signal, tmax_signal), (tmin_noise, tmax_noise)
def SNR_table(self, vmin=SIGNAL_WINDOW_VMIN, vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE, date=None,
verbose=False):
"""
This function provides a means to calculate the SNR of a given signal
(either the linear or phase-weighted stack of an input station pair)
and returns is as a signal float value which is then appended to the
lists: SNR_lin and SNR_pws.
"""
#======================================================================
# RUN FOR LINEAR STACK PROCESSES
#======================================================================
# linear stack at current time step of the xcorrs for station pair
dataarray = self.dataarray
timearray = self.timearray
try:
# signal and noise windows
tsignal, tnoise = self.signal_noise_windows(
vmin, vmax, signal2noise_trail, noise_window_size)
signal_window_plus = (timearray >= tsignal[0]) & \
(timearray <= tsignal[1])
signal_window_minus = (timearray <= -tsignal[0]) & \
(timearray >= -tsignal[1])
peak1 = np.abs(dataarray[signal_window_plus]).max()
peak2 = np.abs(dataarray[signal_window_minus]).max()
peak = max([peak1, peak2])
noise_window1 = (timearray > tsignal[1]) & \
(timearray <= self.timearray[-1])
noise_window2 = (timearray > -tsignal[0]) & \
(timearray < tsignal[0])
noise_window3 = (timearray >= self.timearray[0]) & \
(timearray < -tsignal[1])
noise1 = dataarray[noise_window1]
noise2 = dataarray[noise_window2]
noise3 = dataarray[noise_window3]
noise_list = list(it.chain(*[noise1, noise2, noise3]))
noise = np.nanstd(noise_list)
#SNR with each time-step for linear stack
self.SNR_lin.append([peak / noise, date])
except Exception as err:
if verbose:
print 'There was an unexpected error: ', err
else:
a=5
#======================================================================
# RUN FOR PHASE-WEIGHTED STACK PROCESSES
#======================================================================
# pws at current time step of the xcorrs for station pair
pws = self.pws
try:
# signal and noise windows
tsignal, tnoise = self.signal_noise_windows(
vmin, vmax, signal2noise_trail, noise_window_size)
signal_window_plus = (timearray >= tsignal[0]) & \
(timearray <= tsignal[1])
signal_window_minus = (timearray <= -tsignal[0]) & \
(timearray >= -tsignal[1])
peak1 = np.abs(pws[signal_window_plus]).max()
peak2 = np.abs(pws[signal_window_minus]).max()
peak = max([peak1, peak2])
noise_window1 = (timearray > tsignal[1]) & \
(timearray <= self.timearray[-1])
noise_window2 = (timearray > -tsignal[0]) & \
(timearray < tsignal[0])
noise_window3 = (timearray >= self.timearray[0]) & \
(timearray < -tsignal[1])
noise1 = pws[noise_window1]
noise2 = pws[noise_window2]
noise3 = pws[noise_window3]
noise_list = list(it.chain(*[noise1, noise2, noise3]))
#plt.figure()
#plt.plot(self.timearray[noise_window1],
# pws[noise_window1], color='green')
#plt.plot(self.timearray[noise_window2],
# pws[noise_window2], color='green')
#plt.plot(self.timearray[noise_window3],
# pws[noise_window3], color='green')
#plt.plot(self.timearray[signal_window_plus],
# pws[signal_window_plus], color='blue')
#plt.plot(self.timearray[signal_window_minus],
# pws[signal_window_minus], color='blue')
#plt.show()
noise = np.nanstd(noise_list)
if peak or noise:
SNR_rat = peak/noise
#SNR with each time-step for phase-weighted stack
if SNR_rat:
self.SNR_pws.append([SNR_rat, date])
else:
raise Exception("\nSNR division by zero, None or Nan error, \
skipping ...")
except Exception as err:
if verbose:
print 'There was an unexpected error: ', err
def SNR(self, periodbands=None,
centerperiods_and_alpha=None,
whiten=False, months=None,
vmin=SIGNAL_WINDOW_VMIN,
vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE):
"""
[spectral] signal-to-noise ratio, calculated as the peak
of the absolute amplitude in the signal window divided by
the standard deviation in the noise window.
If period bands are given (in *periodbands*, as a list of
(periodmin, periodmax)), then for each band the SNR is
calculated after band-passing the cross-correlation using
a butterworth filter.
If center periods and alpha are given (in *centerperiods_and_alpha*,
as a list of (center period, alpha)), then for each center
period and alpha the SNR is calculated after band-passing
the cross-correlation using a Gaussian filter
The signal window is defined by *vmin* and *vmax*:
dist/*vmax* < t < dist/*vmin*
The noise window starts *signal2noise_trail* after the
signal window and has a size of *noise_window_size*:
t > dist/*vmin* + *signal2noise_trail*
t < dist/*vmin* + *signal2noise_trail* + *noise_window_size*
If the noise window hits the time limit of the cross-correlation,
we try to extend it to the left until it hits the signal
window.
@type periodbands: (list of (float, float))
@type whiten: bool
@type vmin: float
@type vmax: float
@type signal2noise_trail: float
@type noise_window_size: float
@type months: list of (L{MonthYear} or (int, int))
@rtype: L{numpy.ndarray}
"""
# symmetric part of cross-corr
xcout = self.symmetrize(inplace=False)
#print "vmin: ", vmin
#print "vmax: ", vmax
#print "xcout: ", xcout
#print "xcout time array: ", xcout.timearray
# spectral whitening
if whiten:
xcout = xcout.whiten(inplace=False)
# cross-corr of desired months
xcdata = xcout._get_monthyears_xcdataarray(months=None)
#print "xcdata: ", xcdata
# filter type and associated arguments
if periodbands:
filtertype = 'Butterworth'
kwargslist = [{'periodmin': band[0], 'periodmax': band[1]}
for band in periodbands]
elif centerperiods_and_alpha:
filtertype = 'Gaussian'
kwargslist = [{'period': period, 'alpha': alpha}
for period, alpha in centerperiods_and_alpha]
else:
filtertype = None
kwargslist = [{}]
#print "kwargslist: ", kwargslist
SNR = []
try:
for filterkwargs in kwargslist:
if not filtertype:
dataarray = xcdata
else:
# bandpass filtering data before calculating SNR
dataarray = psutils.bandpass(data=xcdata,
dt=xcout._get_xcorr_dt(),
filtertype=filtertype,
**filterkwargs)
# signal and noise windows
tsignal, tnoise = xcout.signal_noise_windows(
vmin, vmax, signal2noise_trail, noise_window_size)
signal_window = (xcout.timearray >= tsignal[0]) & \
(xcout.timearray <= tsignal[1])
noise_window = (xcout.timearray >= tnoise[0]) & \
(xcout.timearray <= tnoise[1])
#print "t signal, t noise: ", tsignal, tnoise
#print "signal_window: ", signal_window
#print "noise_window: ", noise_window
peak = np.abs(dataarray[signal_window]).max()
noise = np.nanstd(dataarray[noise_window])
#print "peak: ", peak
#print "noise: ", noise
# appending SNR
# check for division by zero, None or Nan
if peak or noise:
SNR.append(peak / noise)
else:
raise Exception("\nDivision by zero, None or Nan error, \
skipping ...")
#print "SNR: ", SNR
self._SNRs = np.array(SNR) if len(SNR) > 1 else np.array(SNR)
self.filterkwargs = filterkwargs
return np.array(SNR) if len(SNR) > 1 else np.array(SNR)
#else:
# return None
except Exception as err:
print 'There was an unexpected error: ', err
def plot(self, whiten=False, sym=False, vmin=SIGNAL_WINDOW_VMIN,
vmax=SIGNAL_WINDOW_VMAX, months=None):
"""
Plots cross-correlation and its spectrum
"""
xcout = self.symmetrize(inplace=False) if sym else self
if whiten:
xcout = xcout.whiten(inplace=False)
# cross-corr of desired months
xcdata = xcout._get_monthyears_xcdataarray(months=months)
# cross-correlation plot ===
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(xcout.timearray, xcdata)
plt.xlabel('Time (s)')
plt.ylabel('Cross-correlation')
plt.grid()
# vmin, vmax
vkwargs = {
'fontsize': 8,
'horizontalalignment': 'center',
'bbox': dict(color='k', facecolor='white')}
if vmin:
ylim = plt.ylim()
plt.plot(2 * [xcout.dist() / vmin], ylim, color='grey')
xy = (xcout.dist() / vmin, plt.ylim()[0])
plt.annotate('{0} km/s'.format(vmin), xy=xy, xytext=xy, **vkwargs)
plt.ylim(ylim)
if vmax:
ylim = plt.ylim()
plt.plot(2 * [xcout.dist() / vmax], ylim, color='grey')
xy = (xcout.dist() / vmax, plt.ylim()[0])
plt.annotate('{0} km/s'.format(vmax), xy=xy, xytext=xy, **vkwargs)
plt.ylim(ylim)
# title
plt.title(xcout._plottitle(months=months))
# spectrum plot ===
plt.subplot(2, 1, 2)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Amplitude')
plt.grid()
# frequency and amplitude arrays
npts = len(xcdata)
nfreq = npts / 2 + 1 if npts % 2 == 0 else (npts + 1) / 2
sampling_rate = 1.0 / xcout._get_xcorr_dt()
freqarray = np.arange(nfreq) * sampling_rate / npts
amplarray = np.abs(rfft(xcdata))
plt.plot(freqarray, amplarray)
plt.xlim((0.0, 0.2))
plt.show()
def plot_by_period_band(self, axlist=None, bands=PERIOD_BANDS,
plot_title=True, whiten=False, tmax=None,
vmin=SIGNAL_WINDOW_VMIN,
vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE,
months=None, outfile=None):
"""
Plots cross-correlation for various bands of periods
The signal window:
vmax / dist < t < vmin / dist,
and the noise window:
t > vmin / dist + signal2noise_trail
t < vmin / dist + signal2noise_trail + noise_window_size,
serve to estimate the SNRs and are highlighted on the plot.
If *tmax* is not given, default is to show times up to the noise
window (plus 5 %). The y-scale is adapted to fit the min and max
cross-correlation AFTER the beginning of the signal window.
@type axlist: list of L{matplotlib.axes.AxesSubplot}
"""
# one plot per band + plot of original xcorr
nplot = len(bands) + 1
# limits of time axis
if not tmax:
# default is to show time up to the noise window (plus 5 %)
tmax = self.dist() / vmin + signal2noise_trail + noise_window_size
tmax = min(1.05 * tmax, self.timearray.max())
xlim = (0, tmax)
# creating figure if not given as input
fig = None
if not axlist:
fig = plt.figure()
axlist = [fig.add_subplot(nplot, 1, i) for i in range(1, nplot + 1)]
for ax in axlist:
# smaller y tick label
ax.tick_params(axis='y', labelsize=9)
axlist[0].get_figure().subplots_adjust(hspace=0)
# symmetrization
xcout = self.symmetrize(inplace=False)
# spectral whitening
if whiten:
xcout = xcout.whiten(inplace=False)
# cross-corr of desired months
xcdata = xcout._get_monthyears_xcdataarray(months=months)
# limits of y-axis = min/max of the cross-correlation
# AFTER the beginning of the signal window
mask = (xcout.timearray >= min(self.dist() / vmax, xlim[1])) & \
(xcout.timearray <= xlim[1])
ylim = (xcdata[mask].min(), xcdata[mask].max())
# signal and noise windows
tsignal, tnoise = xcout.signal_noise_windows(
vmin, vmax, signal2noise_trail, noise_window_size)
# plotting original cross-correlation
axlist[0].plot(xcout.timearray, xcdata)
# title
if plot_title:
title = xcout._plottitle(prefix='Cross-corr. ', months=months)
axlist[0].set_title(title)
# signal window
for t, v, align in zip(tsignal, [vmax, vmin], ['right', 'left']):
axlist[0].plot(2 * [t], ylim, color='k', lw=1.5)
xy = (t, ylim[0] + 0.1 * (ylim[1] - ylim[0]))
axlist[0].annotate(s='{} km/s'.format(v), xy=xy, xytext=xy,
horizontalalignment=align, fontsize=8,
bbox={'color': 'k', 'facecolor': 'white'})
# noise window
axlist[0].fill_between(x=tnoise, y1=[ylim[1], ylim[1]],
y2=[ylim[0], ylim[0]], color='k', alpha=0.2)
# inserting text, e.g., "Original data, SNR = 10.1"
SNR = xcout.SNR(vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
axlist[0].text(x=xlim[1],
y=ylim[0] + 0.85 * (ylim[1] - ylim[0]),
s="Original data, SNR ",
fontsize=9,
horizontalalignment='right',
bbox={'color': 'k', 'facecolor': 'white'})
# formatting axes
axlist[0].set_xlim(xlim)
axlist[0].set_ylim(ylim)
axlist[0].grid(True)
# formatting labels
axlist[0].set_xticklabels([])
axlist[0].get_figure().canvas.draw()
labels = [l.get_text() for l in axlist[0].get_yticklabels()]
labels[0] = labels[-1] = ''
labels[2:-2] = [''] * (len(labels) - 4)
axlist[0].set_yticklabels(labels)
# plotting band-filtered cross-correlation
for ax, (tmin, tmax) in zip(axlist[1:], bands):
lastplot = ax is axlist[-1]
dataarray = psutils.bandpass_butterworth(data=xcdata,
dt=xcout._get_xcorr_dt(),
periodmin=tmin,
periodmax=tmax)
# limits of y-axis = min/max of the cross-correlation
# AFTER the beginning of the signal window
mask = (xcout.timearray >= min(self.dist() / vmax, xlim[1])) & \
(xcout.timearray <= xlim[1])
ylim = (dataarray[mask].min(), dataarray[mask].max())
ax.plot(xcout.timearray, dataarray)
# signal window
for t in tsignal:
ax.plot(2 * [t], ylim, color='k', lw=2)
# noise window
ax.fill_between(x=tnoise, y1=[ylim[1], ylim[1]],
y2=[ylim[0], ylim[0]], color='k', alpha=0.2)
# inserting text, e.g., "10 - 20 s, SNR = 10.1"
SNR = float(xcout.SNR(periodbands=[(tmin, tmax)],
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size))
ax.text(x=xlim[1],
y=ylim[0] + 0.85 * (ylim[1] - ylim[0]),
s="{} - {} s, SNR = {:.1f}".format(tmin, tmax, SNR),
fontsize=9,
horizontalalignment='right',
bbox={'color': 'k', 'facecolor': 'white'})
if lastplot:
# adding label to signalwindows
ax.text(x=self.dist() * (1.0 / vmin + 1.0 / vmax) / 2.0,
y=ylim[0] + 0.1 * (ylim[1] - ylim[0]),
s="Signal window",
horizontalalignment='center',
fontsize=8,
bbox={'color': 'k', 'facecolor': 'white'})
# adding label to noise windows
ax.text(x=sum(tnoise) / 2,
y=ylim[0] + 0.1 * (ylim[1] - ylim[0]),
s="Noise window",
horizontalalignment='center',
fontsize=8,
bbox={'color': 'k', 'facecolor': 'white'})
# formatting axes
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.grid(True)
if lastplot:
ax.set_xlabel('Time (s)')
# formatting labels
if not lastplot:
ax.set_xticklabels([])
ax.get_figure().canvas.draw()
labels = [l.get_text() for l in ax.get_yticklabels()]
labels[0] = labels[-1] = ''
labels[2:-2] = [''] * (len(labels) - 4)
ax.set_yticklabels(labels)
if outfile:
axlist[0].gcf().savefig(outfile, dpi=300, transparent=True)
if fig:
fig.show()
def FTAN(self, whiten=False, phase_corr=None, months=None, vgarray_init=None,
optimize_curve=None, strength_smoothing=STRENGTH_SMOOTHING,
use_inst_freq=USE_INSTANTANEOUS_FREQ, vg_at_nominal_freq=None,
debug=False):
"""
Frequency-time analysis of a cross-correlation function.
Calculates the Fourier transform of the cross-correlation,
calculates the analytic signal in the frequency domain,
applies Gaussian bandpass filters centered around given
center periods, calculates the filtered analytic
signal back in time domain and extracts the group velocity
dispersion curve.
Options:
- set *whiten*=True to whiten the spectrum of the cross-corr.
- provide a function of frequency in *phase_corr* to include a
phase correction.
- provide a list of (int, int) in *months* to restrict the FTAN
to a subset of month-year
- provide an initial guess of dispersion curve (in *vgarray_init*)
to accelerate the group velocity curve extraction
- set *optimize_curve*=True to further optimize the dispersion
curve, i.e., find the curve that really minimizes the penalty
function (which seeks to maximize the traversed amplitude while
penalizing jumps) -- but not necessarily rides through
local maxima any more. Default is True for the raw FTAN (no phase
corr provided), False for the clean FTAN (phase corr provided)
- set the strength of the smoothing term of the dispersion curve
in *strength_smoothing*
- set *use_inst_freq*=True to replace the nominal frequency with
the instantaneous frequency in the dispersion curve.
- if an array is provided in *vg_at_nominal_freq*, then it is filled
with the vg curve BEFORE the nominal freqs are replaced with
instantaneous freqs
Returns (1) the amplitude matrix A(T0,v), (2) the phase matrix
phi(T0,v) (that is, the amplitude and phase function of velocity
v of the analytic signal filtered around period T0) and (3) the
group velocity disperion curve extracted from the amplitude
matrix.
Raises CannotCalculateInstFreq if the calculation of instantaneous
frequencies only gives bad values.
FTAN periods in variable *RAWFTAN_PERIODS* and *CLEANFTAN_PERIODS*
FTAN velocities in variable *FTAN_VELOCITIES*
See. e.g., Levshin & Ritzwoller, "Automated detection,
extraction, and measurement of regional surface waves",
Pure Appl. Geoph. (2001) and Bensen et al., "Processing
seismic ambient noise data to obtain reliable broad-band
surface wave dispersion measurements", Geophys. J. Int. (2007).
@type whiten: bool
@type phase_corr: L{scipy.interpolate.interpolate.interp1d}
@type months: list of (L{MonthYear} or (int, int))
@type vgarray_init: L{numpy.ndarray}
@type vg_at_nominal_freq: L{numpy.ndarray}
@rtype: (L{numpy.ndarray}, L{numpy.ndarray}, L{DispersionCurve})
"""
# no phase correction given <=> raw FTAN
raw_ftan = phase_corr is None
if optimize_curve is None:
optimize_curve = raw_ftan
ftan_periods = RAWFTAN_PERIODS if raw_ftan else CLEANFTAN_PERIODS
# getting the symmetrized cross-correlation
xcout = self.symmetrize(inplace=False)
# whitening cross-correlation
if whiten:
xcout = xcout.whiten(inplace=False)
# cross-corr of desired months
xcdata = xcout._get_monthyears_xcdataarray(months=months)
if xcdata is None:
raise Exception('No data to perform FTAN in selected months')
# FTAN analysis: amplitute and phase function of
# center periods T0 and time t
ampl, phase = FTAN(x=xcdata,
dt=xcout._get_xcorr_dt(),
periods=ftan_periods,
alpha=FTAN_ALPHA,
phase_corr=phase_corr)
# re-interpolating amplitude and phase as functions
# of center periods T0 and velocities v
tne0 = xcout.timearray != 0.0
x = ftan_periods # x = periods
y = (self.dist() / xcout.timearray[tne0])[::-1] # y = velocities
# force y to be strictly increasing!
for i in range(1, len(y)):
if y[i] < y[i-1]:
y[i] = y[i-1] + 1
zampl = ampl[:, tne0][:, ::-1] # z = amplitudes
zphase = phase[:, tne0][:, ::-1] # z = phases
# spline interpolation
ampl_interp_func = RectBivariateSpline(x, y, zampl)
phase_interp_func = RectBivariateSpline(x, y, zphase)
# re-sampling over periods and velocities
ampl_resampled = ampl_interp_func(ftan_periods, FTAN_VELOCITIES)
phase_resampled = phase_interp_func(ftan_periods, FTAN_VELOCITIES)
# extracting the group velocity curve from the amplitude matrix,
# that is, the velocity curve that maximizes amplitude and best
# avoids jumps
vgarray = extract_dispcurve(amplmatrix=ampl_resampled,
velocities=FTAN_VELOCITIES,
varray_init=vgarray_init,
optimizecurve=optimize_curve,
strength_smoothing=strength_smoothing)
if not vg_at_nominal_freq is None:
# filling array with group velocities before replacing
# nominal freqs with instantaneous freqs
vg_at_nominal_freq[...] = vgarray
# if *use_inst_freq*=True, we replace nominal freq with instantaneous
# freq, i.e., we consider that ampl[iT, :], phase[iT, :] and vgarray[iT]
# actually correspond to period 2.pi/|dphi/dt|(t=arrival time), with
# phi(.) = phase[iT, :] and arrival time = dist / vgarray[iT],
# and we re-interpolate them along periods of *ftan_periods*
nom2inst_periods = None
if use_inst_freq:
# array of arrival times
tarray = xcout.dist() / vgarray
# indices of arrival times in time array
it = xcout.timearray.searchsorted(tarray)
it = np.minimum(len(xcout.timearray) - 1, np.maximum(1, it))
# instantaneous freq: omega = |dphi/dt|(t=arrival time),
# with phi = phase of FTAN
dt = xcout.timearray[it] - xcout.timearray[it-1]
nT = phase.shape[0]
omega = np.abs((phase[range(nT), it] - phase[range(nT), it-1]) / dt)
# -> instantaneous period = 2.pi/omega
inst_periods = 2.0 * np.pi / omega
assert isinstance(inst_periods, np.ndarray) # just to enable autocompletion
if debug:
plt.plot(ftan_periods, inst_periods)
# removing outliers (inst periods too small or too different from nominal)
reldiffs = np.abs((inst_periods - ftan_periods) / ftan_periods)
discard = (inst_periods < MIN_INST_PERIOD) | \
(reldiffs > MAX_RELDIFF_INST_NOMINAL_PERIOD)
inst_periods = np.where(discard, np.nan, inst_periods)
# despiking curve of inst freqs (by removing values too
# different from the running median)
n = np.size(inst_periods)
median_periods = []
for i in range(n):
sl = slice(max(i - HALFWINDOW_MEDIAN_PERIOD, 0),
min(i + HALFWINDOW_MEDIAN_PERIOD + 1, n))
mask = ~np.isnan(inst_periods[sl])
if np.any(mask):
med = np.median(inst_periods[sl][mask])
median_periods.append(med)
else:
median_periods.append(np.nan)
reldiffs = np.abs((inst_periods - np.array(median_periods)) / inst_periods)
mask = ~np.isnan(reldiffs)
inst_periods[mask] = np.where(reldiffs[mask] > MAX_RELDIFF_INST_MEDIAN_PERIOD,
np.nan,
inst_periods[mask])
# filling holes by linear interpolation
masknan = np.isnan(inst_periods)
if masknan.all():
# not a single correct value of inst period!
s = "Not a single correct value of instantaneous period!"
raise pserrors.CannotCalculateInstFreq(s)
if masknan.any():
inst_periods[masknan] = np.interp(x=masknan.nonzero()[0],
xp=(~masknan).nonzero()[0],
fp=inst_periods[~masknan])
# looking for the increasing curve that best-fits
# calculated instantaneous periods
def fun(periods):
# misfit wrt calculated instantaneous periods
return np.sum((periods - inst_periods)**2)
# constraints = positive increments
constraints = [{'type': 'ineq', 'fun': lambda p, i=i: p[i+1] - p[i]}
for i in range(len(inst_periods) - 1)]
res = minimize(fun, x0=ftan_periods, method='SLSQP', constraints=constraints)
inst_periods = res['x']
if debug:
plt.plot(ftan_periods, inst_periods)
plt.show()
# re-interpolating amplitude, phase and dispersion curve
# along periods of array *ftan_periods* -- assuming that
# their are currently evaluated along *inst_periods*
vgarray = np.interp(x=ftan_periods,
xp=inst_periods,
fp=vgarray,
left=np.nan,
right=np.nan)
for iv in range(len(FTAN_VELOCITIES)):
ampl_resampled[:, iv] = np.interp(x=ftan_periods,
xp=inst_periods,
fp=ampl_resampled[:, iv],
left=np.nan,
right=np.nan)
phase_resampled[:, iv] = np.interp(x=ftan_periods,
xp=inst_periods,
fp=phase_resampled[:, iv],
left=np.nan,
right=np.nan)
# list of (nominal period, inst period)
nom2inst_periods = zip(ftan_periods, inst_periods)
vgcurve = pstomo.DispersionCurve(periods=ftan_periods,
v=vgarray,
station1=self.station1,
station2=self.station2,
nom2inst_periods=nom2inst_periods)
return ampl_resampled, phase_resampled, vgcurve
def FTAN_complete(self, whiten=False, months=None, add_SNRs=True,
vmin=SIGNAL_WINDOW_VMIN, vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE,
optimize_curve=None,
strength_smoothing=STRENGTH_SMOOTHING,
use_inst_freq=USE_INSTANTANEOUS_FREQ,
**kwargs):
"""
Frequency-time analysis including phase-matched filter and
seasonal variability:
(1) Performs a FTAN of the raw cross-correlation signal,
(2) Uses the raw group velocities to calculate the phase corr.
(3) Performs a FTAN with the phase correction
("phase matched filter")
(4) Repeats the procedure for all 12 trimesters if no
list of months is given
Optionally, adds spectral SNRs at the periods of the clean
vg curve. In this case, parameters *vmin*, *vmax*,
*signal2noise_trail*, *noise_window_size* control the location
of the signal window and the noise window
(see function xc.SNR()).
Options:
- set *whiten*=True to whiten the spectrum of the cross-corr.
- provide a list of (int, int) in *months* to restrict the FTAN
to a subset of month-year
- set *add_SNRs* to calculate the SNR function of period associated
with the disperions curves
- adjust the signal window and the noise window of the SNR through
*vmin*, *vmax*, *signal2noise_trail*, *noise_window_size*
- set *optimize_curve*=True to further optimize the dispersion
curve, i.e., find the curve that really minimizes the penalty
function (which seeks to maximize the traversed amplitude while
preserving smoothness) -- but not necessarily rides through
local maxima. Default is True for the raw FTAN, False for the
clean FTAN
- set the strength of the smoothing term of the dispersion curve
in *strength_smoothing*
- other *kwargs* sent to CrossCorrelation.FTAN()
Returns raw ampl, raw vg, cleaned ampl, cleaned vg.
See. e.g., Levshin & Ritzwoller, "Automated detection,
extraction, and measurement of regional surface waves",
Pure Appl. Geoph. (2001) and Bensen et al., "Processing
seismic ambient noise data to obtain reliable broad-band
surface wave dispersion measurements", Geophys. J. Int. (2007).
@type whiten: bool
@type months: list of (L{MonthYear} or (int, int))
@type add_SNRs: bool
@rtype: (L{numpy.ndarray}, L{numpy.ndarray},
L{numpy.ndarray}, L{DispersionCurve})
"""
# symmetrized, whitened cross-corr
xc = self.symmetrize(inplace=False)
if whiten:
xc = xc.whiten(inplace=False)
# raw FTAN (no need to whiten any more)
rawvg_init = np.zeros_like(RAWFTAN_PERIODS)
try:
rawampl, _, rawvg = xc.FTAN(whiten=False,
months=months,
optimize_curve=optimize_curve,
strength_smoothing=strength_smoothing,
use_inst_freq=use_inst_freq,
vg_at_nominal_freq=rawvg_init,
**kwargs)
except pserrors.CannotCalculateInstFreq:
# pb with instantaneous frequency: returnin NaNs
print "Warning: could not calculate instantenous frequencies in raw FTAN!"
rawampl = np.nan * np.zeros((len(RAWFTAN_PERIODS), len(FTAN_VELOCITIES)))
cleanampl = np.nan * np.zeros((len(CLEANFTAN_PERIODS), len(FTAN_VELOCITIES)))
rawvg = pstomo.DispersionCurve(periods=RAWFTAN_PERIODS,
v=np.nan * np.zeros(len(RAWFTAN_PERIODS)),
station1=self.station1,
station2=self.station2)
cleanvg = pstomo.DispersionCurve(periods=CLEANFTAN_PERIODS,
v=np.nan * np.zeros(len(CLEANFTAN_PERIODS)),
station1=self.station1,
station2=self.station2)
return cleanvg
# phase function from raw vg curve
phase_corr = xc.phase_func(vgcurve=rawvg)
# clean FTAN
cleanvg_init = np.zeros_like(CLEANFTAN_PERIODS)
try:
cleanampl, _, cleanvg = xc.FTAN(whiten=False,
phase_corr=phase_corr,
months=months,
optimize_curve=optimize_curve,
strength_smoothing=strength_smoothing,
use_inst_freq=use_inst_freq,
vg_at_nominal_freq=cleanvg_init,
**kwargs)
except pserrors.CannotCalculateInstFreq:
# pb with instantaneous frequency: returnin NaNs
print "Warning: could not calculate instantenous frequencies in clean FTAN!"
cleanampl = np.nan * np.zeros((len(CLEANFTAN_PERIODS), len(FTAN_VELOCITIES)))
cleanvg = pstomo.DispersionCurve(periods=CLEANFTAN_PERIODS,
v=np.nan * np.zeros(len(CLEANFTAN_PERIODS)),
station1=self.station1,
station2=self.station2)
return rawampl, rawvg, cleanampl, cleanvg
# adding spectral SNRs associated with the periods of the
# clean vg curve
if add_SNRs:
cleanvg.add_SNRs(xc, months=months,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
if months is None:
# set of available months (without year)
available_months = set(mxc.month.m for mxc in xc.monthxcs)
# extracting clean vg curves for all 12 trimesters:
# Jan-Feb-March, Feb-March-Apr ... Dec-Jan-Feb
for trimester_start in range(1, 13):
# months of trimester, e.g. [1, 2, 3], [2, 3, 4] ... [12, 1, 2]
trimester_months = [(trimester_start + i - 1) % 12 + 1
for i in range(3)]
# do we have data in all months?
if any(month not in available_months for month in trimester_months):
continue
# list of month-year whose month belong to current trimester
months_of_xc = [mxc.month for mxc in xc.monthxcs
if mxc.month.m in trimester_months]
# raw-clean FTAN on trimester data, using the vg curve
# extracted from all data as initial guess
try:
_, _, rawvg_trimester = xc.FTAN(
whiten=False,
months=months_of_xc,
vgarray_init=rawvg_init,
optimize_curve=optimize_curve,
strength_smoothing=strength_smoothing,
use_inst_freq=use_inst_freq,
**kwargs)
phase_corr_trimester = xc.phase_func(vgcurve=rawvg_trimester)
_, _, cleanvg_trimester = xc.FTAN(
whiten=False,
phase_corr=phase_corr_trimester,
months=months_of_xc,
vgarray_init=cleanvg_init,
optimize_curve=optimize_curve,
strength_smoothing=strength_smoothing,
use_inst_freq=use_inst_freq,
**kwargs)
except pserrors.CannotCalculateInstFreq:
# skipping trimester in case of pb with instantenous frequency
continue
# adding spectral SNRs associated with the periods of the
# clean trimester vg curve
if add_SNRs:
cleanvg_trimester.add_SNRs(xc, months=months_of_xc,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
# adding trimester vg curve
cleanvg.add_trimester(trimester_start, cleanvg_trimester)
return rawampl, rawvg, cleanampl, cleanvg
def phase_func(self, vgcurve):
"""
Calculates the phase from the group velocity obtained
using method self.FTAN, following the relationship:
k(f) = 2.pi.integral[ 1/vg(f'), f'=f0..f ]
phase(f) = distance.k(f)
Returns the function phase: freq -> phase(freq)
@param vgcurve: group velocity curve
@type vgcurve: L{DispersionCurve}
@rtype: L{scipy.interpolate.interpolate.interp1d}
"""
freqarray = 1.0 / vgcurve.periods[::-1]
vgarray = vgcurve.v[::-1]
mask = ~np.isnan(vgarray)
# array k[f]
k = np.zeros_like(freqarray[mask])
k[0] = 0.0
k[1:] = 2 * np.pi * integrate.cumtrapz(y=1.0 / vgarray[mask], x=freqarray[mask])
# array phi[f]
phi = k * self.dist()
# phase function of f
return interp1d(x=freqarray[mask], y=phi)
def plot_FTAN(self, rawampl=None, rawvg=None, cleanampl=None, cleanvg=None,
whiten=False, months=None, showplot=True, normalize_ampl=True,
logscale=True, bbox=BBOX_SMALL, figsize=(16, 5), outfile=None,
vmin=SIGNAL_WINDOW_VMIN, vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE,
**kwargs):
"""
Plots 4 panels related to frequency-time analysis:
- 1st panel contains the cross-correlation (original, and bandpass
filtered: see method self.plot_by_period_band)
- 2nd panel contains an image of log(ampl^2) (or ampl) function of period
T and group velocity vg, where ampl is the amplitude of the
raw FTAN (basically, the amplitude of the envelope of the
cross-correlation at time t = dist / vg, after applying a Gaussian
bandpass filter centered at period T). The raw and clean dispersion
curves (group velocity function of period) are also shown.
- 3rd panel shows the same image, but for the clean FTAN (wherein the
phase of the cross-correlation is corrected thanks to the raw
dispersion curve). Also shown are the clean dispersion curve,
the 3-month dispersion curves, the standard deviation of the
group velocity calculated from these 3-month dispersion curves
and the SNR function of period.
Only the velocities passing the default selection criteria
(defined in the configuration file) are plotted.
- 4th panel shows a small map with the pair of stations, with
bounding box *bbox* = (min lon, max lon, min lat, max lat),
and, if applicable, a plot of instantaneous vs nominal period
The raw amplitude, raw dispersion curve, clean amplitude and clean
dispersion curve of the FTAN are given in *rawampl*, *rawvg*,
*cleanampl*, *cleanvg* (normally from method self.FTAN_complete).
If not given, the FTAN is performed by calling self.FTAN_complete().
Options:
- Parameters *vmin*, *vmax*, *signal2noise_trail*, *noise_window_size*
control the location of the signal window and the noise window
(see function self.SNR()).
- Set whiten=True to whiten the spectrum of the cross-correlation.
- Set normalize_ampl=True to normalize the plotted amplitude (so
that the max amplitude = 1 at each period).
- Set logscale=True to plot log(ampl^2) instead of ampl.
- Give a list of months in parameter *months* to perform the FTAN
for a particular subset of months.
- additional kwargs sent to *self.FTAN_complete*
The method returns the plot figure.
@param rawampl: 2D array containing the amplitude of the raw FTAN
@type rawampl: L{numpy.ndarray}
@param rawvg: raw dispersion curve
@type rawvg: L{DispersionCurve}
@param cleanampl: 2D array containing the amplitude of the clean FTAN
@type cleanampl: L{numpy.ndarray}
@param cleanvg: clean dispersion curve
@type cleanvg: L{DispersionCurve}
@type showplot: bool
@param whiten: set to True to whiten the spectrum of the cross-correlation
@type whiten: bool
@param normalize_ampl: set to True to normalize amplitude
@type normalize_ampl: bool
@param months: list of months on which perform the FTAN (set to None to
perform the FTAN on all months)
@type months: list of (L{MonthYear} or (int, int))
@param logscale: set to True to plot log(ampl^2), to False to plot ampl
@type logscale: bool
@rtype: L{matplotlib.figure.Figure}
"""
# performing FTAN analysis if needed
if any(obj is None for obj in [rawampl, rawvg, cleanampl, cleanvg]):
rawampl, rawvg, cleanampl, cleanvg = self.FTAN_complete(
whiten=whiten, months=months, add_SNRs=True,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size,
**kwargs)
if normalize_ampl:
# normalizing amplitude at each period before plotting it
# (so that the max = 1)
for a in rawampl:
a[...] /= a.max()
for a in cleanampl:
a[...] /= a.max()
# preparing figure
fig = plt.figure(figsize=figsize)
# =======================================================
# 1th panel: cross-correlation (original and band-passed)
# =======================================================
gs1 = gridspec.GridSpec(len(PERIOD_BANDS) + 1, 1, wspace=0.0, hspace=0.0)
axlist = [fig.add_subplot(ss) for ss in gs1]
self.plot_by_period_band(axlist=axlist, plot_title=False,
whiten=whiten, months=months,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
# ===================
# 2st panel: raw FTAN
# ===================
gs2 = gridspec.GridSpec(1, 1, wspace=0.2, hspace=0)
ax = fig.add_subplot(gs2[0, 0])
extent = (min(RAWFTAN_PERIODS), max(RAWFTAN_PERIODS),
min(FTAN_VELOCITIES), max(FTAN_VELOCITIES))
m = np.log10(rawampl.transpose() ** 2) if logscale else rawampl.transpose()
ax.imshow(m, aspect='auto', origin='lower', extent=extent)
# Period is instantaneous iif a list of (nominal period, inst period)
# is associated with dispersion curve
periodlabel = 'Instantaneous period (sec)' if rawvg.nom2inst_periods \
else 'Nominal period (sec)'
ax.set_xlabel(periodlabel)
ax.set_ylabel("Velocity (km/sec)")
# saving limits
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# raw & clean vg curves
fmt = '--' if (~np.isnan(rawvg.v)).sum() > 1 else 'o'
ax.plot(rawvg.periods, rawvg.v, fmt, color='blue',
lw=2, label='raw disp curve')
fmt = '-' if (~np.isnan(cleanvg.v)).sum() > 1 else 'o'
ax.plot(cleanvg.periods, cleanvg.v, fmt, color='black',
lw=2, label='clean disp curve')
# plotting cut-off period
cutoffperiod = self.dist() / 12.0
ax.plot([cutoffperiod, cutoffperiod], ylim, color='grey')
# setting legend and initial extent
ax.legend(fontsize=11, loc='upper right')
x = (xlim[0] + xlim[1]) / 2.0
y = ylim[0] + 0.05 * (ylim[1] - ylim[0])
ax.text(x, y, "Raw FTAN", fontsize=12,
bbox={'color': 'k', 'facecolor': 'white', 'lw': 0.5},
horizontalalignment='center',
verticalalignment='center')
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# ===========================
# 3nd panel: clean FTAN + SNR
# ===========================
gs3 = gridspec.GridSpec(1, 1, wspace=0.2, hspace=0)
ax = fig.add_subplot(gs3[0, 0])
extent = (min(CLEANFTAN_PERIODS), max(CLEANFTAN_PERIODS),
min(FTAN_VELOCITIES), max(FTAN_VELOCITIES))
m = np.log10(cleanampl.transpose() ** 2) if logscale else cleanampl.transpose()
ax.imshow(m, aspect='auto', origin='lower', extent=extent)
# Period is instantaneous iif a list of (nominal period, inst period)
# is associated with dispersion curve
periodlabel = 'Instantaneous period (sec)' if cleanvg.nom2inst_periods \
else 'Nominal period (sec)'
ax.set_xlabel(periodlabel)
ax.set_ylabel("Velocity (km/sec)")
# saving limits
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# adding SNR function of period (on a separate y-axis)
ax2 = ax.twinx()
ax2.plot(cleanvg.periods, cleanvg.get_SNRs(xc=self), color='green', lw=2)
# fake plot for SNR to appear in legend
ax.plot([-1, 0], [0, 0], lw=2, color='green', label='SNR')
ax2.set_ylabel('SNR', color='green')
for tl in ax2.get_yticklabels():
tl.set_color('green')
# trimester vg curves
ntrimester = len(cleanvg.v_trimesters)
for i, vg_trimester in enumerate(cleanvg.filtered_trimester_vels()):
label = '3-month disp curves (n={})'.format(ntrimester) if i == 0 else None
ax.plot(cleanvg.periods, vg_trimester, color='gray', label=label)
# clean vg curve + error bars
vels, sdevs = cleanvg.filtered_vels_sdevs()
fmt = '-' if (~np.isnan(vels)).sum() > 1 else 'o'
ax.errorbar(x=cleanvg.periods, y=vels, yerr=sdevs, fmt=fmt, color='black',
lw=2, label='clean disp curve')
# legend
ax.legend(fontsize=11, loc='upper right')
x = (xlim[0] + xlim[1]) / 2.0
y = ylim[0] + 0.05 * (ylim[1] - ylim[0])
ax.text(x, y, "Clean FTAN", fontsize=12,
bbox={'color': 'k', 'facecolor': 'white', 'lw': 0.5},
horizontalalignment='center',
verticalalignment='center')
# plotting cut-off period
cutoffperiod = self.dist() / 12.0
ax.plot([cutoffperiod, cutoffperiod], ylim, color='grey')
# setting initial extent
ax.set_xlim(xlim)
ax.set_ylim(ylim)
# ===========================================
# 4rd panel: tectonic provinces + pair (top),
# instantaneous vs nominal period (bottom)
# ===========================================
# tectonic provinces and pairs
gs4 = gridspec.GridSpec(1, 1, wspace=0.2, hspace=0.0)
ax = fig.add_subplot(gs4[0, 0])
psutils.basemap(ax, labels=False, axeslabels=False)
x = (self.station1.coord[0], self.station2.coord[0])
y = (self.station1.coord[1], self.station2.coord[1])
s = (self.station1.name, self.station2.name)
ax.plot(x, y, '^-', color='k', ms=10, mfc='w', mew=1)
for lon, lat, label in zip(x, y, s):
ax.text(lon, lat, label, ha='center', va='bottom', fontsize=7, weight='bold')
ax.set_xlim(bbox[:2])
ax.set_ylim(bbox[2:])
# instantaneous vs nominal period (if applicable)
gs5 = gridspec.GridSpec(1, 1, wspace=0.2, hspace=0.0)
if rawvg.nom2inst_periods or cleanvg.nom2inst_periods:
ax = fig.add_subplot(gs5[0, 0])
if rawvg.nom2inst_periods:
nomperiods, instperiods = zip(*rawvg.nom2inst_periods)
ax.plot(nomperiods, instperiods, '-', label='raw FTAN')
if cleanvg.nom2inst_periods:
nomperiods, instperiods = zip(*cleanvg.nom2inst_periods)
ax.plot(nomperiods, instperiods, '-', label='clean FTAN')
ax.set_xlabel('Nominal period (s)')
ax.set_ylabel('Instantaneous period (s)')
ax.legend(fontsize=9, loc='lower right')
ax.grid(True)
# adjusting sizes
gs1.update(left=0.03, right=0.25)
gs2.update(left=0.30, right=0.535)
gs3.update(left=0.585, right=0.81)
gs4.update(left=0.85, right=0.98, bottom=0.51)
gs5.update(left=0.87, right=0.98, top=0.48)
# figure title, e.g., 'BL.GNSB-IU.RCBR, dist=1781 km, ndays=208'
title = self._FTANplot_title(months=months)
fig.suptitle(title, fontsize=14)
# exporting to file
if outfile:
fig.savefig(outfile, dpi=300, transparent=True)
if showplot:
plt.show()
return fig
def _plottitle(self, prefix='', months=None):
"""
E.g., 'SPB-ITAB (365 days from 2002-01-01 to 2002-12-01)'
or 'SPB-ITAB (90 days in months 01-2002, 02-2002)'
"""
s = '{pref}{sta1}-{sta2} '
s = s.format(pref=prefix, sta1=self.station1.name, sta2=self.station2.name)
if not months:
nday = self.nday
s += '({} days from {} to {})'.format(
nday, self.startday.strftime('%d/%m/%Y'),
self.endday.strftime('%d/%m/%Y'))
else:
monthxcs = [mxc for mxc in self.monthxcs if mxc.month in months]
nday = sum(monthxc.nday for monthxc in monthxcs)
strmonths = ', '.join(str(m.month) for m in monthxcs)
s += '{} days in months {}'.format(nday, strmonths)
return s
def _FTANplot_title(self, months=None):
"""
E.g., 'BL.GNSB-IU.RCBR, dist=1781 km, ndays=208'
"""
if not months:
nday = self.nday
else:
nday = sum(monthxc.nday for monthxc in self.monthxcs
if monthxc.month in months)
title = u"{}-{}, dist={:.0f} km, ndays={}"
title = title.format(self.station1.network + '.' + self.station1.name,
self.station2.network + '.' + self.station2.name,
self.dist(), nday)
return title
def _get_xcorr_dt(self):
"""
Returns the interval of the time array.
Warning: no check is made to ensure that that interval is constant.
@rtype: float
"""
return self.timearray[1] - self.timearray[0]
def _get_xcorr_nmax(self):
"""
Returns the max index of time array:
- self.timearray = [-t[nmax] ... t[0] ... t[nmax]] if not symmetrized
- = [t[0] ... t[nmax-1] t[nmax]] if symmetrized
@rtype: int
"""
nt = len(self.timearray)
return int((nt - 1) * 0.5) if not self.symmetrized else int(nt - 1)
def _get_monthyears_xcdataarray(self, months=None):
"""
Returns the sum of cross-corr data arrays of given
list of (month,year) -- or the whole cross-corr if
monthyears is None.
@type months: list of (L{MonthYear} or (int, int))
@rtype: L{numpy.ndarray}
"""
if not months:
return self.dataarray
else:
monthxcs = [mxc for mxc in self.monthxcs if mxc.month in months]
if monthxcs:
return sum(monthxc.dataarray for monthxc in monthxcs)
else:
return None
class CrossCorrelationCollection(AttribDict):
"""
Collection of cross-correlations
= AttribDict{station1.name: AttribDict {station2.name: instance of CrossCorrelation}}
AttribDict is a dict (defined in obspy.core) whose keys are also
attributes. This means that a cross-correlation between a pair
of stations STA01-STA02 can be accessed both ways:
- self['STA01']['STA02'] (easier in regular code)
- self.STA01.STA02 (easier in an interactive session)
"""
def __init__(self):
"""
Initializing object as AttribDict
"""
AttribDict.__init__(self)
def __repr__(self):
npair = len(self.pairs())
s = '(AttribDict)<Collection of cross-correlation between {0} pairs>'
return s.format(npair)
def pairs(self, sort=False, minday=1, minSNR=None, mindist=None,
withnets=None, onlywithnets=None, pairs_subset=None,
**kwargs):
"""
Returns pairs of stations of cross-correlation collection
verifying conditions.
Additional arguments in *kwargs* are sent to xc.SNR().
@type sort: bool
@type minday: int
@type minSNR: float
@type mindist: float
@type withnets: list of str
@type onlywithnets: list of str
@type pairs_subset: list of (str, str)
@rtype: list of (str, str)
"""
pairs = [(s1, s2) for s1 in self for s2 in self[s1]]
if sort:
pairs.sort()
# filtering subset of pairs
if pairs_subset:
pairs_subset = [set(pair) for pair in pairs_subset]
pairs = [pair for pair in pairs if set(pair) in pairs_subset]
# filtering by nb of days
pairs = [(s1, s2) for (s1, s2) in pairs
if self[s1][s2].nday >= minday]
# filtering by min SNR
if minSNR:
pairs = [(s1, s2) for (s1, s2) in pairs
if self[s1][s2].SNR(**kwargs) >= minSNR]
# filtering by distance
if mindist:
pairs = [(s1, s2) for (s1, s2) in pairs
if self[s1][s2].dist() >= mindist]
# filtering by network
if withnets:
# one of the station of the pair must belong to networks
pairs = [(s1, s2) for (s1, s2) in pairs if
self[s1][s2].station1.network in withnets or
self[s1][s2].station2.network in withnets]
if onlywithnets:
# both stations of the pair must belong to networks
pairs = [(s1, s2) for (s1, s2) in pairs if
self[s1][s2].station1.network in onlywithnets and
self[s1][s2].station2.network in onlywithnets]
return pairs
def pairs_and_SNRarrays(self, pairs_subset=None, minspectSNR=None,
whiten=False, verbose=False,
vmin=SIGNAL_WINDOW_VMIN, vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE):
"""
Returns pairs and spectral SNR array whose spectral SNRs
are all >= minspectSNR
Parameters *vmin*, *vmax*, *signal2noise_trail*, *noise_window_size*
control the location of the signal window and the noise window
(see function self.SNR()).
Returns {pair1: SNRarray1, pair2: SNRarray2 etc.}
@type pairs_subset: list of (str, str)
@type minspectSNR: float
@type whiten: bool
@type verbose: bool
@rtype: dict from (str, str) to L{numpy.ndarray}
"""
if verbose:
print "Estimating spectral SNR of pair:",
# initial list of pairs
pairs = pairs_subset if pairs_subset else self.pairs()
# filetring by min spectral SNR
SNRarraydict = {}
for (s1, s2) in pairs:
if verbose:
print '{0}-{1}'.format(s1, s2),
SNRarray = self[s1][s2].SNR(periodbands=PERIOD_BANDS, whiten=whiten,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
if not minspectSNR or min(SNRarray) >= minspectSNR:
SNRarraydict[(s1, s2)] = SNRarray
if verbose:
print
return SNRarraydict
def add(self, tracedict, stations, xcorr_tmax,
xcorrdict=None, date=None, verbose=False):
"""
Stacks cross-correlations between pairs of stations
from a dict of {station.name: Trace} (in *tracedict*).
You can provide pre-calculated cross-correlations in *xcorrdict*
= dict {(station1.name, station2.name): numpy array containing cross-corr}
Initializes self[station1][station2] as an instance of CrossCorrelation
if the pair station1-station2 is not in self
@type tracedict: dict from str to L{obspy.core.trace.Trace}
@type stations: list of L{pysismo.psstation.Station}
@type xcorr_tmax: float
@type verbose: bool
"""
if not xcorrdict:
xcorrdict = {}
stationtrace_pairs = it.combinations(sorted(tracedict.items()), 2)
for (s1name, tr1), (s2name, tr2) in stationtrace_pairs:
if verbose:
print "{s1}-{s2}".format(s1=s1name, s2=s2name),
# checking that sampling rates are equal
assert tr1.stats.sampling_rate == tr2.stats.sampling_rate
# looking for s1 and s2 in the list of stations
station1 = next(s for s in stations if s.name == s1name)
station2 = next(s for s in stations if s.name == s2name)
# initializing self[s1] if s1 not in self
# (avoiding setdefault() since behavior in unknown with AttribDict)
if s1name not in self:
self[s1name] = AttribDict()
# initializing self[s1][s2] if s2 not in self[s1]
if s2name not in self[s1name]:
self[s1name][s2name] = CrossCorrelation(
station1=station1,
station2=station2,
xcorr_dt=1.0 / tr1.stats.sampling_rate,
xcorr_tmax=xcorr_tmax)
# stacking cross-correlation
try:
# getting pre-calculated cross-corr, if provided
xcorr = xcorrdict.get((s1name, s2name), None)
self[s1name][s2name].add(tr1, tr2, xcorr=xcorr)
self[s1name][s2name].phase_stack(tr1, tr2, xcorr=xcorr)
self[s1name][s2name].phase_weighted_stack()
self[s1name][s2name].SNR_table(date=date)
except pserrors.NaNError:
# got NaN
s = "Warning: got NaN in cross-corr between {s1}-{s2} -> skipping"
print s.format(s1=s1name, s2=s2name)
if verbose:
print
def plot_pws(self, xlim=None, norm=True,
whiten=False, sym=False, minSNR=None, minday=1,
withnets=None, onlywithnets=None, figsize=(21.0, 12.0),
outfile=None, dpi=300, showplot=True, stack_style='linear'):
"""
Method for plotting phase-weighted stacking, one plot per station
pair is produced.
"""
# preparing pairs
pairs = self.pairs(minday=minday, minSNR=minSNR, withnets=withnets,
onlywithnets=onlywithnets)
npair = len(pairs)
if not npair:
print "Nothing to plot!"
return
plt.figure()
# one plot for each pair
nrow = int(np.sqrt(npair))
if np.sqrt(npair) != nrow:
nrow += 1
ncol = int(npair / nrow)
if npair % nrow != 0:
ncol += 1
# sorting pairs alphabetically
pairs.sort()
for iplot, (s1, s2) in enumerate(pairs):
plt.figure(iplot)
# symmetrizing cross-corr if necessary
xcplot = self[s1][s2]
# plotting
plt.plot(xcplot.timearray, xcplot.dataarray)
if xlim:
plt.xlim(xlim)
# title
s = '{s1}-{s2}: {nday} stacks from {t1} to {t2} {cnf}.png'
#remove microseconds in time string
title = s.format(s1=s1, s2=s2,
nday=xcplot.nday,
t1=str(xcplot.startday)[:-11],
t2=str(xcplot.endday)[:-11])
plt.title(title)
# x-axis label
#if iplot + 1 == npair:
plt.xlabel('Time (s)')
out = os.path.abspath(os.path.join(outfile, os.pardir))
outfile_individual = os.path.join(out, title)
if os.path.exists(outfile_individual):
# backup
shutil.copyfile(outfile_individual, \
outfile_individual + '~')
fig = plt.gcf()
fig.set_size_inches(figsize)
print(outfile_individual)
fig.savefig(outfile_individual, dpi=dpi)
def process_SNR(self, outfile=None, dpi=180,
figsize=(21.0, 12.0),
stat_list=[], verbose=False):
# process all SNR information together to determine mean SNR and
# whether or not a station pair works
for item in self.items():
s1 = item[0]
SNRarrays = []
timearrays = []
for s2 in item[1].keys():
if s1 != s2:
info_array = np.asarray(self[s1][s2].SNR_lin)
if len(info_array) > 1:
SNRarray, timearray = info_array[:,0], info_array[:,1]
SNRarrays.append(SNRarray)
timearrays.append(timearray)
pickle_name = 'SNR_{}-{}.pickle'.format(s1, s2)
outfile_pickle = os.path.join(outfile, pickle_name)
SNRarrays = list(it.chain(*SNRarrays))
timearrays = list(it.chain(*timearrays))
SNR_output = np.column_stack((timearrays, SNRarrays))
# import CONFIG class initalised in ./configs/tmp_config.pickle
#SNR_pickle = 'configs/tmp_config.pickle'
#f = open(name=config_pickle, mode='rb')
#CONFIG = pickle.load(f)
#f.close()
file_name = 'SNR_distribution_{}-{}.png'.format(s1, s2)
outfile_individual = os.path.join(outfile, file_name)
fig = plt.figure()
plt.xlabel("Signal-To-Noise Ratio")
plt.ylabel("Probability Density")
SNRarrays = sorted(SNRarrays)
SNRmean = np.mean(SNRarrays)
SNRstd = np.std(SNRarrays)
# calculate IQR
q75, q25 = np.percentile(SNRarrays, [75 ,25])
iqr = q75 - q25
print "Interquartile Range: ", iqr
pdf = stats.norm.pdf(SNRarrays, SNRmean, SNRstd)
pdf = pdf / 1.0
iqr_y = 2 * [np.max(pdf) / 2.0]
iqr_x = [q25, q75]
pdf_max = np.max(pdf)
print "Probability Density Function Max: ", pdf_max
plt.title("%s-%s SNR Distribution\n IQR: %0.2f" %(s1, s2, iqr))
#plt.boxplot(SNRarrays)
plt.plot(SNRarrays, pdf)
#plt.plot(iqr_x, iqr_y, color='red')
#plt.ylim([0, 1.0])
fig.savefig(outfile_individual, dpi=dpi)
with open(outfile_pickle, 'wb') as f:
print "\nExporting total station pair SNR information to: " + f.name
pickle.dump(SNR_output, f, protocol=2)
fig.savefig(os.path.join(outfile, "total_distribution.png"), dpi=dpi)
def plot_SNR(self, plot_type='all', figsize=(21.0, 12.0),
outfile=None, dpi=300, showplot=True, stat_list=[],
verbose=False):
# preparing pairs
pairs = self.pairs()
npair = len(pairs)
if not npair:
raise Exception("So SNR pairs to plot!")
return
if plot_type == 'individual':
# plot all individual SNR vs. time curves on seperate figures
for item in self.items():
s1 = item[0]
for s2 in item[1].keys():
#print '{}-{}'.format(s1, s2)
#try:
fig = plt.figure()
info_array = np.asarray(self[s1][s2].SNR_lin)
if len(info_array) > 0:
SNRarray, timearray = info_array[:,0], info_array[:,1]
plt.plot(timearray, SNRarray, c='k')
s = '{s1}-{s2}: SNR vs. time for {nstacks}\n \
stacks from {t1} to {t2}'
title = s.format(s1=s1, s2=s2,
nstacks=len(SNRarray),
t1=timearray[0],
t2=timearray[-1]
)
plt.title(title)
plt.ylabel('SNR (Max. Signal Amp. / Noise Std')
plt.xlabel('Time (UTC)')
file_name = '{}-{}-SNR.png'.format(s1, s2)
outfile_individual = os.path.join(outfile, file_name)
if os.path.exists(outfile_individual):
# backup
shutil.copyfile(outfile_individual, \
outfile_individual + '~')
fig = plt.gcf()
fig.set_size_inches(figsize)
print '{s1}-{s2}'.format(s1=s1, s2=s2),
fig.savefig(outfile_individual, dpi=dpi)
fig.clf()
#except Exception as err:
# continue
elif plot_type == 'all':
# plot all individual SNR vs. time curves on single figure
fig = plt.figure()
title = 'Total Database Pairs SNR vs. time \n \
stacks from {} to {}'.format(FIRSTDAY, LASTDAY)
plt.title(title)
plt.xlabel('Time (UTC)')
plt.ylabel('SNR (Max. Signal Amp. / Noise Std')
for item in self.items():
s1 = item[0]
for s2 in item[1].keys():
if s1 != s2:
info_array = np.asarray(self[s1][s2].SNR_lin)
if len(info_array) > 1:
if verbose:
print '{}-{}'.format(s1, s2)
print "info_array", info_array
SNRarray, timearray = info_array[:,0], info_array[:,1]
plt.scatter(timearray, SNRarray, alpha=0.3, c='b')
#except Exception as err:
# print err
file_name = 'SNR_total.png'
outfile_individual = os.path.join(outfile, file_name)
if os.path.exists(outfile_individual):
# backup
shutil.copyfile(outfile_individual, \
outfile_individual + '~')
fig.savefig(outfile_individual, dpi=dpi)
def plot(self, plot_type='distance', xlim=None, norm=True, whiten=False,
sym=False, minSNR=None, minday=1, withnets=None, onlywithnets=None,
figsize=(21.0, 12.0), outfile=None, dpi=300, showplot=True,
stack_type='linear', fill=False, absolute=False,
freq_central=None):
"""
method to plot a collection of cross-correlations
"""
# preparing pairs
pairs = self.pairs(minday=minday, minSNR=minSNR, withnets=withnets,
onlywithnets=onlywithnets)
npair = len(pairs)
if not npair:
print "Nothing to plot!"
return
plt.figure()
# classic plot = one plot for each pair
if plot_type == 'classic':
nrow = int(np.sqrt(npair))
if np.sqrt(npair) != nrow:
nrow += 1
ncol = int(npair / nrow)
if npair % nrow != 0:
ncol += 1
# sorting pairs alphabetically
pairs.sort()
print 'Now saving cross-correlation plots for all station pairs ...'
for iplot, (s1, s2) in enumerate(pairs):
plt.figure(iplot)
# symmetrizing cross-corr if necessary
#xcplot = self[s1][s2].symmetrize(inplace=False) \
#if sym else self[s1][s2]
xcplot = self[s1][s2]
# spectral whitening
if whiten:
xcplot = xcplot.whiten(inplace=False)
# subplot
#plt.subplot(nrow, ncol, iplot + 1)
if stack_type == 'PWS':
filter_trace = Trace(data=xcplot.pws)
elif stack_type == 'SNR':
filter_trace = Trace(data=xcplot.SNR_stack)
elif stack_type == 'combined':
filter_trace = Trace(data=xcplot.comb_stack)
elif stack_type == 'linear':
filter_trace = Trace(data=xcplot.dataarray)
else:
filter_trace = Trace(data=xcplot.dataarray)
# number of seconds in time array
n_secs = 2.0 * xcplot.timearray[-1]
sample_rate = int(len(filter_trace) / n_secs)
filter_trace.stats.sampling_rate = sample_rate
xcplot.dataarray = filter_trace.data
#print "xcplot.dataarray: ", xcplot.dataarray
nrm = np.max(np.abs(xcplot.dataarray)) if norm else 1.0
# normalizing factor
#pws_nrm = np.max(np.abs(xcplot.pws))
#plt.figure(1)
#plt.plot(xcplot.timearray, xcplot.pws / pws_nrm, c='r', alpha=0.5)
#plt.plot(xcplot.timearray, lin / lin_nrm, c='b')
#plt.show()
#plt.clf()
# plotting
if xcplot.timearray.shape != xcplot.dataarray.shape:
plt.plot(xcplot.timearray[:-1], xcplot.dataarray / nrm)
else:
plt.plot(xcplot.timearray, xcplot.dataarray / nrm)
if xlim:
plt.xlim(xlim)
# title
locs1 = ','.join(sorted(["'{0}'".format(loc) \
for loc in xcplot.locs1]))
locs2 = ','.join(sorted(["'{0}'".format(loc) \
for loc in xcplot.locs2]))
#remove microseconds in time string
s = '{s1}-{s2}: {nday} stacks from {t1} to {t2} {sta} stack.png'
title = s.format(s1=s1, s2=s2,
nday=xcplot.nday,
t1=str(xcplot.startday)[:-11],
t2=str(xcplot.endday)[:-11], sta=stack_type)
out = os.path.abspath(os.path.join(outfile, os.pardir))
outfile_individual = os.path.join(out, title)
plt.title(title)
# x-axis label
#if iplot + 1 == npair:
plt.xlabel('Time (s)')
print outfile_individual
if os.path.exists(outfile_individual):
# backup
shutil.copyfile(outfile_individual, \
outfile_individual + '~')
fig = plt.gcf()
fig.set_size_inches(figsize)
#print(outfile_individual)
print '{s1}-{s2}'.format(s1=s1, s2=s2),
fig.savefig(outfile_individual, dpi=dpi)
#enter number of cross-correlations to be plotted as to not crowd the image
# distance plot = one plot for all pairs, y-shifted according to pair distance
elif plot_type == 'distance':
# set max dist. to 2500km
maxdist = max(self[x][y].dist() for (x, y) in pairs)
if maxdist > 2500.0:
maxdist = 2500.0
corr2km = maxdist / 10.0
cc = mpl.rcParams['axes.color_cycle'] # color cycle
#filter out every station pair with more than 2500km distance
print "pair length 1: ", len(pairs)
#for (s1, s2) in pairs:
# if self[s1][s2].dist() >= 2500.0:
dist_filter = np.array([self[s1][s2].dist() < 2500.0 for (s1, s2) in pairs])
print "dist_filter: ", dist_filter
pairs = np.asarray(pairs)
pairs = list(pairs[dist_filter])
print "pairs: ", pairs
print "pair length 2: ", len(pairs)
# sorting pairs by distance
pairs.sort(key=lambda (s1, s2): self[s1][s2].dist())
pairs
pairs.reverse()
pairs_copy = pairs #create a pairs list of equi-distant station pairs no longer than plot_number
pairs_list = []
plot_number = len(pairs) #gives maximum number of xcorrs that can fit on a page
instance_number = len(pairs) / plot_number #gives number of instances to skip
#gives every instance number inside a list e.g. every 7th instance in pairs
for i, pair in enumerate(pairs_copy):
if i < len(pairs) - 1 and (i == 0 or i%instance_number == 0):
pairs_list.append(pair)
if plot_number <= len(pairs_list):
break
else:
continue
else:
pass
for ipair, (s1, s2) in enumerate(pairs_list):
# symmetrizing cross-corr if necessary
xcplot = self[s1][s2].symmetrize(inplace=False) if sym else self[s1][s2]
#xc = self[s1][s2]
# spectral whitening
if whiten:
xcplot = xcplot.whiten(inplace=False)
color = cc[ipair % len(cc)]
#color = 'k'
# normalizing factor
nrm = max(xcplot.dataarray) if norm else 1.0
filter_trace = Trace(data=xcplot.dataarray)
if stack_type == 'PWS':
filter_trace = Trace(data=xcplot.pws)
if stack_type == 'SNR':
filter_trace = Trace(data=xcplot.SNR_stack)
if stack_type == 'combined':
filter_trace = Trace(data=xcplot.comb_stack)
elif stack_type == 'linear':
filter_trace = Trace(data=xcplot.dataarray)
else:
filter_trace = Trace(data=xcplot.dataarray)
# number of seconds in time array
#n_secs = 2.0 * xcplot.timearray[-1]
#sample_rate = int(len(xcplot.dataarray) / n_secs)
#filter_trace.stats.sampling_rate = sample_rate
#freq_min = freq_central - freq_central / 10.0
#freq_max = freq_central + freq_central / 10.0
#print "freq_min: ", freq_min
#print "freq_max: ", freq_max
#filter_trace = filter_trace.filter(type="bandpass",
# freqmin=freq_min,
# freqmax=freq_max,
# corners=2,
# zerophase=True)
xcplot.dataarray = filter_trace.data
# plotting
xarray = xcplot.timearray
#get absolute value of data array for more visual plot
print "y shape: ", xcplot.dataarray.shape
print "x shape: ", xcplot.timearray.shape
if xcplot.dataarray.shape > xcplot.timearray.shape:
xcplot.dataarray.shape = xcplot.dataarray.shape[:-1]
elif xcplot.dataarray.shape < xcplot.timearray.shape:
xcplot.timearray.shape = xcplot.timearray.shape[:-1]
print "y shape: ", xcplot.dataarray.shape
print "x shape: ", xcplot.timearray.shape
if fill and absolute:
yarray = corr2km * abs(xcplot.dataarray) / nrm + xcplot.dist()
#for point in yarray:
# if point - xcplot.dist() < 400: point = xcplot.dist();
plt.fill_between(xarray, xcplot.dist(), yarray, color=color)
elif fill and not absolute:
yarray = xcplot.dist() + corr2km * (xcplot.dataarray / nrm)
plt.fill_between(xarray, xcplot.dist(), yarray, color=color)
elif not fill and absolute:
yarray = xcplot.dist() + corr2km * abs(xcplot.dataarray) / nrm
plt.plot(xarray, yarray, color = color)
else:
yarray = xcplot.dist() + corr2km * xcplot.dataarray / nrm
plt.plot(xarray, yarray, color = color)
#d = [0]*len(yarray)
if xlim:
plt.xlim(xlim)
# adding annotation @ xytest, annotation line @ xyarrow
xmin, xmax = plt.xlim()
xextent = plt.xlim()[1] - plt.xlim()[0]
ymin = -0.1 * maxdist
ymax = 1.1 * maxdist
# # all annotations on the right side
# x = xmax - xextent / 10.0
# y = maxdist if npair == 1 else ymin + ipair*(ymax-ymin)/(npair-1)
# xytext = (x, y)
# xyarrow = (x - xextent / 30.0, xcplot.dist())
# align = 'left'
# relpos = (0, 0.5)
if npair <= 1:
# alternating right/left
sign = 2 * (ipair % 2 - 0.5)
x = xmin + xextent / 10.0 if sign > 0 else xmax - xextent / 10.0
y = ymin + ipair / 2 * (ymax - ymin) / (npair / 2 - 1.0)
xytext = (x, y)
xyarrow = (x + sign * xextent / 30.0, xcplot.dist())
align = 'right' if sign > 0 else 'left'
relpos = (1, 0.5) if sign > 0 else (0, 0.5)
bbox = {'color': color, 'facecolor': 'white', 'alpha': 0.9}
arrowprops = {'arrowstyle': "-", 'relpos': relpos, 'color': color}
plt.annotate(s=s, xy=xyarrow, xytext=xytext, fontsize=9,
color='k', horizontalalignment=align,
bbox=bbox, arrowprops=arrowprops)
net1 = xcplot.station1.network
net2 = xcplot.station2.network
locs1 = ','.join(sorted(["'{0}'".format(loc) for loc in xcplot.locs1]))
locs2 = ','.join(sorted(["'{0}'".format(loc) for loc in xcplot.locs2]))
s = '{net1}.{s1}[{locs1}]-{net2}.{s2}[{locs2}]: {nday} days {t1}-{t2}'
s = s.format(net1=net1, s1=s1, locs1=locs1, net2=net2, s2=s2,
locs2=locs2, nday=xcplot.nday,
t1=xcplot.startday.strftime('%d/%m/%y'),
t2=xcplot.endday.strftime('%d/%m/%y'))
print s
plt.grid()
plt.xlabel('Time (s)')
plt.ylabel('Distance (km)')
plt.ylim((0, plt.ylim()[1]))
plt.grid()
plt.xlabel('Time (s)')
plt.ylabel('Distance (km)')
plt.ylim((0, plt.ylim()[1]))
print "outfile: ", outfile
# saving figure
if plot_type == 'distance':
if outfile:
if os.path.exists(outfile):
# backup
shutil.copyfile(outfile, outfile + '~')
fig = plt.gcf()
fig.set_size_inches(figsize)
fig.savefig(outfile, dpi=dpi)
else:
pass
if showplot:
# showing plot
plt.show()
plt.close()
def plot_spectral_SNR(self, whiten=False, minSNR=None, minspectSNR=None,
minday=1, mindist=None, withnets=None, onlywithnets=None,
vmin=SIGNAL_WINDOW_VMIN, vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE):
"""
Plots spectral SNRs
"""
# filtering pairs
pairs = self.pairs(minday=minday, minSNR=minSNR, mindist=mindist,
withnets=withnets, onlywithnets=onlywithnets,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
#SNRarrays = dict{(station1,station2): SNR array}
SNRarrays = self.pairs_and_SNRarrays(
pairs_subset=pairs, minspectSNR=minspectSNR,
whiten=whiten, verbose=True,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
npair = len(SNRarrays)
if not npair:
print 'Nothing to plot!!!'
return
# min-max SNR
minSNR = min([SNR for SNRarray in SNRarrays.values() for SNR in SNRarray])
maxSNR = max([SNR for SNRarray in SNRarrays.values() for SNR in SNRarray])
# sorting SNR arrays by increasing first value
SNRarrays = OrderedDict(sorted(SNRarrays.items(), key=lambda (k, v): v[0]))
# array of mid of time bands
periodarray = [(tmin + tmax) / 2.0 for (tmin, tmax) in PERIOD_BANDS]
minperiod = min(periodarray)
# color cycle
cc = mpl.rcParams['axes.color_cycle']
# plotting SNR arrays
plt.figure()
for ipair, ((s1, s2), SNRarray) in enumerate(SNRarrays.items()):
xc = self[s1][s2]
color = cc[ipair % len(cc)]
# SNR vs period
plt.plot(periodarray, SNRarray, color=color)
# annotation
xtext = minperiod - 4
ytext = minSNR * 0.5 + ipair * (maxSNR - minSNR * 0.5) / (npair - 1)
xytext = (xtext, ytext)
xyarrow = (minperiod - 1, SNRarray[0])
relpos = (1, 0.5)
net1 = xc.station1.network
net2 = xc.station2.network
s = '{i}: {net1}.{s1}-{net2}.{s2}: {dist:.1f} km, {nday} days'
s = s.format(i=ipair, net1=net1, s1=s1, net2=net2, s2=s2,
dist=xc.dist(), nday=xc.nday)
bbox = {'color': color, 'facecolor': 'white', 'alpha': 0.9}
arrowprops = {'arrowstyle': '-', 'relpos': relpos, 'color': color}
plt.annotate(s=s, xy=xyarrow, xytext=xytext, fontsize=9,
color='k', horizontalalignment='right',
bbox=bbox, arrowprops=arrowprops)
plt.xlim((0.0, plt.xlim()[1]))
plt.xlabel('Period (s)')
plt.ylabel('SNR')
plt.title(u'{0} pairs'.format(npair))
plt.grid()
plt.show()
def plot_pairs(self, minSNR=None, minspectSNR=None, minday=1, mindist=None,
withnets=None, onlywithnets=None, pairs_subset=None, whiten=False,
stationlabel=False, bbox=BBOX_LARGE, xsize=10, plotkwargs=None,
SNRkwargs=None):
"""
Plots pairs of stations on a map
@type bbox: tuple
"""
if not plotkwargs:
plotkwargs = {}
if not SNRkwargs:
SNRkwargs = {}
# filtering pairs
pairs = self.pairs(minday=minday, minSNR=minSNR, mindist=mindist,
withnets=withnets, onlywithnets=onlywithnets,
pairs_subset=pairs_subset, **SNRkwargs)
if minspectSNR:
# plotting only pairs with all spect SNR >= minspectSNR
SNRarraydict = self.pairs_and_SNRarrays(
pairs_subset=pairs, minspectSNR=minspectSNR,
whiten=whiten, verbose=True, **SNRkwargs)
pairs = SNRarraydict.keys()
# nb of pairs
npair = len(pairs)
if not npair:
print 'Nothing to plot!!!'
return
# initializing figure
aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0])
plt.figure(figsize=(xsize, aspectratio * xsize))
# plotting coasts and tectonic provinces
psutils.basemap(plt.gca(), bbox=bbox)
# plotting pairs
for s1, s2 in pairs:
x, y = zip(self[s1][s2].station1.coord, self[s1][s2].station2.coord)
if not plotkwargs:
plotkwargs = dict(color='grey', lw=0.5)
plt.plot(x, y, '-', **plotkwargs)
# plotting stations
x, y = zip(*[s.coord for s in self.stations(pairs)])
plt.plot(x, y, '^', color='k', ms=10, mfc='w', mew=1)
if stationlabel:
# stations label
for station in self.stations(pairs):
plt.text(station.coord[0], station.coord[1], station.name,
ha='center', va='bottom', fontsize=10, weight='bold')
# setting axes
plt.title(u'{0} pairs'.format(npair))
plt.xlim(bbox[:2])
plt.ylim(bbox[2:])
plt.show()
def export(self, outprefix, stations=None, verbose=False):
"""
Exports cross-correlations to picke file and txt file
@type outprefix: str or unicode
@type stations: list of L{Station}
"""
self._to_picklefile(outprefix, verbose=verbose)
self._to_ascii(outprefix, verbose=verbose)
self._pairsinfo_to_ascii(outprefix, verbose=verbose)
self._stationsinfo_to_ascii(outprefix, stations=stations, verbose=verbose)
def FTAN_pairs(self, prefix=None, suffix='', whiten=False,
normalize_ampl=True, logscale=True, mindist=None,
minSNR=None, minspectSNR=None, monthyears=None,
vmin=SIGNAL_WINDOW_VMIN, vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE,
**kwargs):
# setting default prefix if not given
if not prefix:
parts = [os.path.join(FTAN_DIR, 'FTAN')]
if whiten:
parts.append('whitenedxc')
if mindist:
parts.append('mindist={}'.format(mindist))
if minSNR:
parts.append('minSNR={}'.format(minSNR))
if minspectSNR:
parts.append('minspectSNR={}'.format(minspectSNR))
if monthyears:
parts.extend('{:02d}-{}'.format(m, y) for m, y in monthyears)
else:
parts = [prefix]
if suffix:
parts.append(suffix)
# path of output files (without extension)
outputpath = u'_'.join(parts)
pdfpath = u'{}.pdf'.format(outputpath)
if os.path.exists(pdfpath):
# backup
shutil.copyfile(pdfpath, pdfpath + '~')
# opening pdf file
pdf = PdfPages(pdfpath)
# filtering pairs
pairs = self.pairs(sort=True, minSNR=minSNR, mindist=mindist,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
#print "\nNo. of pairs before distance filter: ", len(pairs)
# filter stations by maximum possible attenuation distance!
#dist_filter = np.array([self[s1][s2].dist() < 1.2 *
# max(self[s1][s2].timearray)
# for (s1, s2) in pairs])
#pairs = list(np.asarray(pairs)[dist_filter])
#print "\nNo. of pairs after distance filter: ", len(pairs)
# pairs filtered by max. pair distance
#dist_pairs = []
#for pair in pairs:
# if self[pair[0]][pair[1]].dist() < 1.2*max(self[pair[0]][pair[1]].timearray)*vmax:
# dist_pairs.append(pair)
#pairs = dist_pairs
if minspectSNR:
# plotting only pairs with all spect SNR >= minspectSNR
SNRarraydict = self.pairs_and_SNRarrays(
pairs_subset=pairs, minspectSNR=minspectSNR,
whiten=whiten, verbose=True,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
pairs = sorted(SNRarraydict.keys())
s = ("Exporting FTANs of {0} pairs to file {1}.pdf\n"
"and dispersion curves to file {1}.pickle\n")
print s.format(len(pairs), outputpath)
return pairs, outputpath
def FTAN_parallel(self, pair, prefix=None, suffix='', whiten=False,
normalize_ampl=True, logscale=True, mindist=None,
minSNR=None, minspectSNR=None, monthyears=None,
vmin=SIGNAL_WINDOW_VMIN, vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE,
savefigs=False, outputpath=None,
**kwargs):
s1, s2 = pair
# appending FTAN plot of pair s1-s2 to pdf
#print "\nProcessing ...",
xc = self[s1][s2]
assert isinstance(xc, CrossCorrelation)
#try:
print "{}-{} ".format(s1, s2),
# complete FTAN analysis
try:
rawampl, rawvg, cleanampl, cleanvg = xc.FTAN_complete(
whiten=whiten, months=monthyears,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size,
**kwargs)
return cleanvg
except:
return None
#FTAN_output = (rawampl, rawvg, cleanampl, cleanvg)
# save individual FTAN output files if set
#if savefigs:
# FTAN_folder = os.path.join(FTAN_DIR, "FIGURES")
# if not os.path.exists(FTAN_folder): os.mkdir(FTAN_folder)
# FTAN_file = "{}-{}_FTAN.pdf".format(s1, s2)
# FTAN_figure = os.path.join(FTAN_folder, FTAN_file)
# if os.path.exists(FTAN_figure):
# backup
# shutil.copyfile(FTAN_figure, FTAN_figure + '~')
# fig = xc.plot_FTAN(rawampl, rawvg, cleanampl, cleanvg,
# whiten=False,
# normalize_ampl=True,
# logscale=True,
# showplot=False,
# vmin=vmin,
# vmax=vmax,
# signal2noise_trail=signal2noise_trail,
# noise_window_size=noise_window_size)
# opening pdf file
# pdf = PdfPages(FTAN_figure)
# pdf.savefig(fig)
# plt.close()
#print "Complete."
#except Exception as err:
# something went wrong with this FTAN
# err = err
# save individual FTAN output files
#if savefigs and fig:
# FTAN_folder = os.path.join(FTAN_DIR, "FIGURES")
# if not os.path.exists(FTAN_folder): os.mkdir(FTAN_folder)
# FTAN_file = "{}-{}_FTAN.pdf".format(s1, s2)
# FTAN_figure = os.path.join(FTAN_folder, FTAN_file)
# if os.path.exists(FTAN_figure):
# backup
# shutil.copyfile(FTAN_figure, FTAN_figure + '~')
# opening pdf file
# pdf = PdfPages(FTAN_figure)
# pdf.savefig(fig)
# plt.close()
def FTANs(self, prefix=None, suffix='', whiten=False,
normalize_ampl=True, logscale=True, mindist=None,
minSNR=None, minspectSNR=None, monthyears=None,
vmin=SIGNAL_WINDOW_VMIN, vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE,
**kwargs):
"""
Exports raw-clean FTAN plots to pdf (one page per pair)
and clean dispersion curves to pickle file by calling
plot_FTAN() for each cross-correlation.
pdf is exported to *prefix*[_*suffix*].pdf
dispersion curves are exported to *prefix*[_*suffix*].pickle
If *prefix* is not given, then it is automatically set up as:
*FTAN_DIR*/FTAN[_whitenedxc][_mindist=...][_minsSNR=...]
[_minspectSNR=...][_month-year_month-year]
e.g.: ./output/FTAN/FTAN_whitenedxc_minspectSNR=10
Options:
- Parameters *vmin*, *vmax*, *signal2noise_trail*, *noise_window_size*
control the location of the signal window and the noise window
(see function xc.SNR()).
- Set whiten=True to whiten the spectrum of the cross-correlation.
- Set normalize_ampl=True to normalize the plotted amplitude (so
that the max amplitude = 1 at each period).
- Set logscale=True to plot log(ampl^2) instead of ampl.
- additional kwargs sent to FTAN_complete() and plot_FTAN()
See. e.g., Levshin & Ritzwoller, "Automated detection,
extraction, and measurement of regional surface waves",
Pure Appl. Geoph. (2001) and Bensen et al., "Processing
seismic ambient noise data to obtain reliable broad-band
surface wave dispersion measurements", Geophys. J. Int. (2007).
@type prefix: str or unicode
@type suffix: str or unicode
@type minSNR: float
@type mindist: float
@type minspectSNR: float
@type whiten: bool
@type monthyears: list of (int, int)
"""
# setting default prefix if not given
if not prefix:
parts = [os.path.join(FTAN_DIR, 'FTAN')]
if whiten:
parts.append('whitenedxc')
if mindist:
parts.append('mindist={}'.format(mindist))
if minSNR:
parts.append('minSNR={}'.format(minSNR))
if minspectSNR:
parts.append('minspectSNR={}'.format(minspectSNR))
if monthyears:
parts.extend('{:02d}-{}'.format(m, y) for m, y in monthyears)
else:
parts = [prefix]
if suffix:
parts.append(suffix)
# path of output files (without extension)
outputpath = u'_'.join(parts)
pdfpath = u'{}.pdf'.format(outputpath)
if os.path.exists(pdfpath):
# backup
shutil.copyfile(pdfpath, pdfpath + '~')
# opening pdf file
pdf = PdfPages(pdfpath)
# filtering pairs
pairs = self.pairs(sort=True, minSNR=minSNR, mindist=mindist,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
# pairs filtered by max. pair distance
dist_pairs = []
for pair in pairs:
if self[pair[0]][pair[1]].dist() < 1.2*max(self[pair[0]][pair[1]].timearray)*vmax:
dist_pairs.append(pair)
pairs = dist_pairs
if minspectSNR:
# plotting only pairs with all spect SNR >= minspectSNR
SNRarraydict = self.pairs_and_SNRarrays(
pairs_subset=pairs, minspectSNR=minspectSNR,
whiten=whiten, verbose=True,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
pairs = sorted(SNRarraydict.keys())
s = ("Exporting FTANs of {0} pairs to file {1}.pdf\n"
"and dispersion curves to file {1}.pickle\n")
print s.format(len(pairs), outputpath)
cleanvgcurves = []
print "Appending FTAN of pair:",
for i, (s1, s2) in enumerate(pairs):
# appending FTAN plot of pair s1-s2 to pdf
print "[{}] {}-{}".format(i + 1, s1, s2),
xc = self[s1][s2]
assert isinstance(xc, CrossCorrelation)
#try:
# complete FTAN analysis
rawampl, rawvg, cleanampl, cleanvg = xc.FTAN_complete(
whiten=whiten, months=monthyears,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size,
**kwargs)
#print "cleanvg: ", cleanvg
# plotting raw-clean FTAN
fig = xc.plot_FTAN(rawampl, rawvg, cleanampl, cleanvg,
whiten=whiten,
normalize_ampl=normalize_ampl,
logscale=logscale,
showplot=False,
vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size,
**kwargs)
pdf.savefig(fig)
plt.close()
# appending clean vg curve
cleanvgcurves.append(cleanvg)
#except Exception as err:
# something went wrong with this FTAN
# print "\nGot unexpected error:\n\n{}\n\nSKIPPING PAIR!".format(err)
print "\nSaving files..."
# closing pdf
pdf.close()
# exporting vg curves to pickle file
f = psutils.openandbackup(outputpath + '.pickle', mode='wb')
pickle.dump(cleanvgcurves, f, protocol=2)
f.close()
def stations(self, pairs, sort=True):
"""
Returns a list of unique stations corresponding
to a list of pairs (of station name).
@type pairs: list of (str, str)
@rtype: list of L{pysismo.psstation.Station}
"""
stations = []
for s1, s2 in pairs:
if self[s1][s2].station1 not in stations:
stations.append(self[s1][s2].station1)
if self[s1][s2].station2 not in stations:
stations.append(self[s1][s2].station2)
if sort:
stations.sort(key=lambda obj: obj.name)
return stations
def _to_picklefile(self, outprefix, verbose=False):
"""
Dumps cross-correlations to (binary) pickle file
@type outprefix: str or unicode
"""
if verbose:
s = "Exporting cross-correlations in binary format to file: {}.pickle"
print s.format(outprefix)
f = psutils.openandbackup(outprefix + '.pickle', mode='wb')
pickle.dump(self, f, protocol=2)
f.close()
def _to_ascii(self, outprefix, verbose=False):
"""
Exports cross-correlations to txt file
@type outprefix: str or unicode
"""
if verbose:
s = "Exporting cross-correlations in ascci format to file: {}.txt"
print s.format(outprefix)
# writing data file: time array (1st column)
# and cross-corr array (one column per pair)
f = psutils.openandbackup(outprefix + '.txt', mode='w')
pairs = [(s1, s2) for (s1, s2) in self.pairs(sort=True) if self[s1][s2].nday]
# writing header
header = ['time'] + ["{0}-{1}".format(s1, s2) for s1, s2 in pairs]
f.write('\t'.join(header) + '\n')
# writing line = ith [time, cross-corr 1st pair, cross-corr 2nd pair etc]
data = zip(self._get_timearray(), *[self[s1][s2].dataarray for s1, s2 in pairs])
for fields in data:
line = [str(fld) for fld in fields]
f.write('\t'.join(line) + '\n')
f.close()
def _pairsinfo_to_ascii(self, outprefix, verbose=False):
"""
Exports pairs information to txt file
@type outprefix: str or unicode
"""
if verbose:
s = "Exporting pairs information to file: {}.stats.txt"
print s.format(outprefix)
# writing file: coord, locations, ids etc. for each pair
pairs = self.pairs(sort=True)
f = psutils.openandbackup(outprefix + '.stats.txt', mode='w')
# header
header = ['pair', 'lon1', 'lat1', 'lon2', 'lat2',
'locs1', 'locs2', 'ids1', 'ids2',
'distance', 'startday', 'endday', 'nday']
f.write('\t'.join(header) + '\n')
# fields
for (s1, s2) in pairs:
fields = [
'{0}-{1}'.format(s1, s2),
self[s1][s2].station1.coord[0],
self[s1][s2].station1.coord[1],
self[s1][s2].station2.coord[0],
self[s1][s2].station2.coord[1],
','.join(sorted("'{}'".format(l) for l in self[s1][s2].locs1)),
','.join(sorted("'{}'".format(l) for l in self[s1][s2].locs2)),
','.join(sorted(sid for sid in self[s1][s2].ids1)),
','.join(sorted(sid for sid in self[s1][s2].ids2)),
self[s1][s2].dist(),
self[s1][s2].startday,
self[s1][s2].endday,
self[s1][s2].nday
]
line = [str(fld) if (fld or fld == 0) else 'none' for fld in fields]
f.write('\t'.join(line) + '\n')
f.close()
def _stationsinfo_to_ascii(self, outprefix, stations=None, verbose=False):
"""
Exports information on cross-correlated stations
to txt file
@type outprefix: str or unicode
@type stations: list of {Station}
"""
if verbose:
s = "Exporting stations information to file: {}.stations.txt"
print s.format(outprefix)
if not stations:
# extracting the list of stations from cross-correlations
# if not provided
stations = self.stations(self.pairs(minday=0), sort=True)
# opening stations file and writing:
# station name, network, lon, lat, nb of pairs, total days of cross-corr
f = psutils.openandbackup(outprefix + '.stations.txt', mode='w')
header = ['name', 'network', 'lon', 'lat', 'npair', 'nday']
f.write('\t'.join(header) + '\n')
for station in stations:
# pairs in which station appears
pairs = [(s1, s2) for s1, s2 in self.pairs()
if station in [self[s1][s2].station1, self[s1][s2].station2]]
# total nb of days of pairs
nday = sum(self[s1][s2].nday for s1, s2 in pairs)
# writing fields
fields = [
station.name,
station.network,
str(station.coord[0]),
str(station.coord[1]),
str(len(pairs)),
str(nday)
]
f.write('\t'.join(fields) + '\n')
f.close()
def _get_timearray(self):
"""
Returns time array of cross-correlations
@rtype: L{numpy.ndarray}
"""
pairs = self.pairs()
# reference time array
s1, s2 = pairs[0]
reftimearray = self[s1][s2].timearray
# checking that all time arrays are equal to reference time array
for (s1, s2) in pairs:
if np.any(self[s1][s2].timearray != reftimearray):
s = 'Cross-corr collection does not have a unique timelag array'
raise Exception(s)
return reftimearray
def load_pickled_xcorr(pickle_file):
"""
Loads pickle-dumped cross-correlations
@type pickle_file: str or unicode
@rtype: L{CrossCorrelationCollection}
"""
f = open(name=pickle_file, mode='rb')
xc = pickle.load(f)
f.close()
return xc
def load_pickled_xcorr_interactive(xcorr_dir=CROSSCORR_DIR, xcorr_files='xcorr*.pickle*'):
"""
Loads interactively pickle-dumped cross-correlations, by giving the user
a choice among a list of file matching xcorrFiles
@type xcorr_dir: str or unicode
@type xcorr_files: str or unicode
@rtype: L{CrossCorrelationCollection}
"""
# looking for files that match xcorrFiles
pathxcorr = os.path.join(xcorr_dir, xcorr_files)
flist = glob.glob(pathname=pathxcorr)
flist.sort()
pickle_file = None
if len(flist) == 1:
pickle_file = flist[0]
print 'Reading cross-correlation from file ' + pickle_file
elif len(flist) > 0:
print 'Select file containing cross-correlations:'
print '\n'.join('{i} - {f}'.format(i=i, f=os.path.basename(f))
for (i, f) in enumerate(flist))
i = int(raw_input('\n'))
pickle_file = flist[i]
# loading cross-correlations
xc = load_pickled_xcorr(pickle_file=pickle_file)
return xc
def FTAN(x, dt, periods, alpha, phase_corr=None):
"""
Frequency-time analysis of a time series.
Calculates the Fourier transform of the signal (xarray),
calculates the analytic signal in frequency domain,
applies Gaussian bandpass filters centered around given
center periods, and calculates the filtered analytic
signal back in time domain.
Returns the amplitude/phase matrices A(f0,t) and phi(f0,t),
that is, the amplitude/phase function of time t of the
analytic signal filtered around period T0 = 1 / f0.
See. e.g., Levshin & Ritzwoller, "Automated detection,
extraction, and measurement of regional surface waves",
Pure Appl. Geoph. (2001) and Bensen et al., "Processing
seismic ambient noise data to obtain reliable broad-band
surface wave dispersion measurements", Geophys. J. Int. (2007).
@param dt: sample spacing
@type dt: float
@param x: data array
@type x: L{numpy.ndarray}
@param periods: center periods around of Gaussian bandpass filters
@type periods: L{numpy.ndarray} or list
@param alpha: smoothing parameter of Gaussian filter
@type alpha: float
@param phase_corr: phase correction, function of freq
@type phase_corr: L{scipy.interpolate.interpolate.interp1d}
@rtype: (L{numpy.ndarray}, L{numpy.ndarray})
"""
# Initializing amplitude/phase matrix: each column =
# amplitude function of time for a given Gaussian filter
# centered around a period
amplitude = np.zeros(shape=(len(periods), len(x)))
phase = np.zeros(shape=(len(periods), len(x)))
# Fourier transform
Xa = fft(x)
# aray of frequencies
freq = fftfreq(len(Xa), d=dt)
# analytic signal in frequency domain:
# | 2X(f) for f > 0
# Xa(f) = | X(f) for f = 0
# | 0 for f < 0
# with X = fft(x)
Xa[freq < 0] = 0.0
Xa[freq > 0] *= 2.0
# applying phase correction: replacing phase with given
# phase function of freq
if phase_corr:
# doamin of definition of phase_corr(f)
minfreq = phase_corr.x.min()
maxfreq = phase_corr.x.max()
mask = (freq >= minfreq) & (freq <= maxfreq)
# replacing phase with user-provided phase correction:
# updating Xa(f) as |Xa(f)|.exp(-i.phase_corr(f))
phi = phase_corr(freq[mask])
Xa[mask] = np.abs(Xa[mask]) * np.exp(-1j * phi)
# tapering
taper = cosTaper(npts=mask.sum(), p=0.05)
Xa[mask] *= taper
Xa[~mask] = 0.0
# applying narrow bandpass Gaussian filters
for iperiod, T0 in enumerate(periods):
# bandpassed analytic signal
f0 = 1.0 / T0
Xa_f0 = Xa * np.exp(-alpha * ((freq - f0) / f0) ** 2)
# back to time domain
xa_f0 = ifft(Xa_f0)
# filling amplitude and phase of column
amplitude[iperiod, :] = np.abs(xa_f0)
phase[iperiod, :] = np.angle(xa_f0)
return amplitude, phase
def extract_dispcurve(amplmatrix, velocities, periodmask=None, varray_init=None,
optimizecurve=True, strength_smoothing=STRENGTH_SMOOTHING):
"""
Extracts a disperion curve (velocity vs period) from an amplitude
matrix *amplmatrix*, itself obtained from FTAN.
Among the curves that ride along local maxima of amplitude,
the selected group velocity curve v(T) maximizes the sum of
amplitudes, while preserving some smoothness (minimizing of
*dispcurve_penaltyfunc*).
The curve can be furthered optimized using a minimization
algorithm, which then seek the curve that really minimizes
the penalty function -- but does not necessarily ride through
the local maxima any more.
If an initial vel array is given (*varray_init*) and
*optimizecurve*=True then only the optimization algorithm
is applied, using *varray_init* as starting point.
*strength_smoothing* controls the relative strength of the
smoothing term in the penalty function.
amplmatrix[i, j] = amplitude at period nb i and velocity nb j
@type amplmatrix: L{numpy.ndarray}
@type velocities: L{numpy.ndarray}
@type varray_init: L{numpy.ndarray}
@rtype: L{numpy.ndarray}
"""
if not varray_init is None and optimizecurve:
# if an initial guess for vg array is given, we simply apply
# the optimization procedure using it as starting guess
return optimize_dispcurve(amplmatrix=amplmatrix,
velocities=velocities,
vg0=varray_init,
strength_smoothing=strength_smoothing)[0]
nperiods = amplmatrix.shape[0]
# building list of possible (v, ampl) curves at all periods
v_ampl_arrays = None
def extract_periods(iperiod, amplmatrix=amplmatrix, velocities=velocities,
v_ampl_arrays=v_ampl_arrays, nperiods=nperiods):
# local maxima of amplitude at period nb *iperiod*
argsmax = psutils.local_maxima_indices(amplmatrix[iperiod, :])
if not v_ampl_arrays:
# initialzing the list of possible (v, ampl) curves with local maxima
# at current period, and nan elsewhere
v_ampl_arrays = [(np.zeros(nperiods) * np.nan, np.zeros(nperiods) * np.nan)
for _ in range(len(argsmax))]
for argmax, (varray, amplarray) in zip(argsmax, v_ampl_arrays):
varray[iperiod] = velocities[argmax]
amplarray[iperiod] = amplmatrix[iperiod, argmax]
continue
# inserting the velocities that locally maximizes amplitude
# to the correct curves
for argmax in argsmax:
# velocity that locally maximizes amplitude
v = velocities[argmax]
# we select the (v, ampl) curve for which the jump wrt previous
# v (not nan) is minimum
lastv = lambda varray: varray[:iperiod][~np.isnan(varray[:iperiod])][-1]
vjump = lambda (varray, amplarray): np.abs(lastv(varray) - v)
varray, amplarray = min(v_ampl_arrays, key=vjump)
# if the curve already has a vel attributed at this period, we
# duplicate it
if not np.isnan(varray[iperiod]):
varray, amplarray = copy.copy(varray), copy.copy(amplarray)
v_ampl_arrays.append((varray, amplarray))
# inserting (vg, ampl) at current period to the selected curve
varray[iperiod] = v
amplarray[iperiod] = amplmatrix[iperiod, argmax]
# filling curves without (vg, ampl) data at the current period
unfilledcurves = [(varray, amplarray) for varray, amplarray in v_ampl_arrays
if np.isnan(varray[iperiod])]
for varray, amplarray in unfilledcurves:
# inserting vel (which locally maximizes amplitude) for which
# the jump wrt the previous (not nan) v of the curve is minimum
lastv = varray[:iperiod][~np.isnan(varray[:iperiod])][-1]
vjump = lambda arg: np.abs(lastv - velocities[arg])
argmax = min(argsmax, key=vjump)
varray[iperiod] = velocities[argmax]
amplarray[iperiod] = amplmatrix[iperiod, argmax]
return varray
for iperiod in range(nperiods):
# local maxima of amplitude at period nb *iperiod*
argsmax = psutils.local_maxima_indices(amplmatrix[iperiod, :])
if not argsmax:
# no local minimum => leave nan in (v, ampl) curves
continue
if not v_ampl_arrays:
# initialzing the list of possible (v, ampl) curves with local maxima
# at current period, and nan elsewhere
v_ampl_arrays = [(np.zeros(nperiods) * np.nan, np.zeros(nperiods) * np.nan)
for _ in range(len(argsmax))]
for argmax, (varray, amplarray) in zip(argsmax, v_ampl_arrays):
varray[iperiod] = velocities[argmax]
amplarray[iperiod] = amplmatrix[iperiod, argmax]
continue
# inserting the velocities that locally maximizes amplitude
# to the correct curves
for argmax in argsmax:
# velocity that locally maximizes amplitude
v = velocities[argmax]
# we select the (v, ampl) curve for which the jump wrt previous
# v (not nan) is minimum
lastv = lambda varray: varray[:iperiod][~np.isnan(varray[:iperiod])][-1]
vjump = lambda (varray, amplarray): np.abs(lastv(varray) - v)
# method 1
#t0 = dt.datetime.now()
#vjumps = np.array(map(vjump, v_ampl_arrays))
#varray1, amplarray1 = v_ampl_arrays[np.argmin(vjumps)]
#delta = (dt.datetime.now() - t0).total_seconds()
#print "\n{} seconds.".format(delta)
# method 2
#t0 = dt.datetime.now()
varray, amplarray = min(v_ampl_arrays, key=vjump)
#delta = (dt.datetime.now() - t0).total_seconds()
#print "{} seconds.".format(delta)
#print varray2 is varray1, amplarray2 is amplarray1
# method 3
#t0 = dt.datetime.now()
#for varray, ampl_array in v_ampl_arrays:
# v1 = lastv(varray) - v
# print v1
#v_ampl_arrays = np.array(v_ampl_arrays)
#varray3 = v_ampl_arrays[:,0][:iperiod][~np.isnan(v_ampl_arrays[:,0][:iperiod])][-1]
#print varray3
#delta = (dt.datetime.now() - t0).total_seconds()
#print "{} seconds.".format(delta)
#quit()
#print varray2 is varray3, amplarray2 is amplarray3
#v_ampl_arrays = list(v_ampl_arrays)
# if the curve already has a vel attributed at this period, we
# duplicate it
if not np.isnan(varray[iperiod]):
varray, amplarray = copy.copy(varray), copy.copy(amplarray)
v_ampl_arrays.append((varray, amplarray))
# inserting (vg, ampl) at current period to the selected curve
varray[iperiod] = v
amplarray[iperiod] = amplmatrix[iperiod, argmax]
# filling curves without (vg, ampl) data at the current period
unfilledcurves = [(varray, amplarray) for varray, amplarray in v_ampl_arrays
if np.isnan(varray[iperiod])]
for varray, amplarray in unfilledcurves:
# inserting vel (which locally maximizes amplitude) for which
# the jump wrt the previous (not nan) v of the curve is minimum
lastv = varray[:iperiod][~np.isnan(varray[:iperiod])][-1]
vjump = lambda arg: np.abs(lastv - velocities[arg])
argmax = min(argsmax, key=vjump)
varray[iperiod] = velocities[argmax]
amplarray[iperiod] = amplmatrix[iperiod, argmax]
# print "varray 1: ", varray
# varrays = map(extract_periods, range(nperiods))
# print "varrays: ", varrays
# amongst possible vg curves, we select the one that maximizes amplitude,
# while preserving some smoothness
def funcmin((varray, amplarray)):
if not periodmask is None:
return dispcurve_penaltyfunc(varray[periodmask],
amplarray[periodmask],
strength_smoothing=strength_smoothing)
else:
return dispcurve_penaltyfunc(varray, amplarray,
strength_smoothing=strength_smoothing)
varray, _ = min(v_ampl_arrays, key=funcmin)
# filling holes of vg curve
masknan = np.isnan(varray)
if masknan.any():
varray[masknan] = np.interp(x=masknan.nonzero()[0],
xp=(~masknan).nonzero()[0],
fp=varray[~masknan])
# further optimizing curve using a minimization algorithm
if optimizecurve:
# first trying with initial guess = the one above
varray1, funcmin1 = optimize_dispcurve(amplmatrix=amplmatrix,
velocities=velocities,
vg0=varray,
periodmask=periodmask,
strength_smoothing=strength_smoothing)
# then trying with initial guess = constant velocity 3 km/s
varray2, funcmin2 = optimize_dispcurve(amplmatrix=amplmatrix,
velocities=velocities,
vg0=3.0 * np.ones(nperiods),
periodmask=periodmask,
strength_smoothing=strength_smoothing)
varray = varray1 if funcmin1 <= funcmin2 else varray2
return varray
def optimize_dispcurve(amplmatrix, velocities, vg0, periodmask=None,
strength_smoothing=STRENGTH_SMOOTHING):
"""
Optimizing vel curve, i.e., looking for curve that really
minimizes *dispcurve_penaltyfunc* -- and does not necessarily
ride any more through local maxima
Returns optimized vel curve and the corresponding
value of the objective function to minimize
@type amplmatrix: L{numpy.ndarray}
@type velocities: L{numpy.ndarray}
@rtype: L{numpy.ndarray}, float
"""
if np.any(np.isnan(vg0)):
raise Exception("Init velocity array cannot contain NaN")
nperiods = amplmatrix.shape[0]
# function that returns the amplitude curve
# a given input vel curve goes through
ixperiods = np.arange(nperiods)
amplcurvefunc2d = RectBivariateSpline(ixperiods, velocities, amplmatrix, kx=1, ky=1)
amplcurvefunc = lambda vgcurve: amplcurvefunc2d.ev(ixperiods, vgcurve)
def funcmin(varray):
"""Objective function to minimize"""
# amplitude curve corresponding to vel curve
if not periodmask is None:
return dispcurve_penaltyfunc(varray[periodmask],
amplcurvefunc(varray)[periodmask],
strength_smoothing=strength_smoothing)
else:
return dispcurve_penaltyfunc(varray,
amplcurvefunc(varray),
strength_smoothing=strength_smoothing)
bounds = nperiods * [(np.min(velocities) + 0.1, np.max(velocities) - 0.1)]
method = 'SLSQP' # methods with bounds: L-BFGS-B, TNC, SLSQP
resmin = minimize(fun=funcmin, x0=vg0, method=method, bounds=bounds)
vgcurve = resmin['x']
# _ = funcmin(vgcurve, verbose=True)
return vgcurve, resmin['fun']
def dispcurve_penaltyfunc(vgarray, amplarray, strength_smoothing=STRENGTH_SMOOTHING):
"""
Objective function that the vg dispersion curve must minimize.
The function is composed of two terms:
- the first term, - sum(amplitude), seeks to maximize the amplitudes
traversed by the curve
- the second term, sum(dvg**2) (with dvg the difference between
consecutive velocities), is a smoothing term penalizing
discontinuities
*vgarray* is the velocity curve function of period, *amplarray*
gives the amplitudes traversed by the curve and *strength_smoothing*
is the strength of the smoothing term.
@type vgarray: L{numpy.ndarray}
@type amplarray: L{numpy.ndarray}
"""
# removing nans
notnan = ~(np.isnan(vgarray) | np.isnan(amplarray))
vgarray = vgarray[notnan]
# jumps
dvg = vgarray[1:] - vgarray[:-1]
sumdvg2 = np.sum(dvg**2)
# amplitude
sumamplitude = amplarray.sum()
# vg curve must maximize amplitude and minimize jumps
return -sumamplitude + strength_smoothing*sumdvg2
if __name__ == '__main__':
# loading pickled cross-correlations
xc = load_pickled_xcorr_interactive()
print "Cross-correlations available in variable 'xc':"
| gpl-3.0 |
UDST/urbansim | urbansim/utils/sampling.py | 4 | 7852 | import math
import numpy as np
import pandas as pd
def get_probs(data, prob_column=None):
"""
Checks for presence of a probability column and returns the result
as a numpy array. If the probabilities are weights (i.e. they don't
sum to 1), then this will be recalculated.
Parameters
----------
data: pandas.DataFrame
Table to sample from.
prob_column: string, optional, default None
Name of the column in the data to provide probabilities or weights.
Returns
-------
numpy.array
"""
if prob_column is None:
p = None
else:
p = data[prob_column].fillna(0).values
if p.sum() == 0:
p = np.ones(len(p))
if abs(p.sum() - 1.0) > 1e-8:
p = p / (1.0 * p.sum())
return p
def accounting_sample_replace(total, data, accounting_column, prob_column=None, max_iterations=50):
"""
Sample rows with accounting with replacement.
Parameters
----------
total : int
The control total the sampled rows will attempt to match.
data: pandas.DataFrame
Table to sample from.
accounting_column: string
Name of column with accounting totals/quantities to apply towards the control.
prob_column: string, optional, default None
Name of the column in the data to provide probabilities or weights.
max_iterations: int, optional, default 50
When using an accounting attribute, the maximum number of sampling iterations
that will be applied.
Returns
-------
sample_rows : pandas.DataFrame
Table containing the sample.
matched: bool
Indicates if the total was matched exactly.
"""
# check for probabilities
p = get_probs(data, prob_column)
# determine avg number of accounting items per sample (e.g. persons per household)
per_sample = data[accounting_column].sum() / (1.0 * len(data.index.values))
curr_total = 0
remaining = total
sample_rows = pd.DataFrame()
closest = None
closest_remain = total
matched = False
for i in range(0, max_iterations):
# stop if we've hit the control
if remaining == 0:
matched = True
break
# if sampling with probabilities, re-caclc the # of items per sample
# after the initial sample, this way the sample size reflects the probabilities
if p is not None and i == 1:
per_sample = sample_rows[accounting_column].sum() / (1.0 * len(sample_rows))
# update the sample
num_samples = int(math.ceil(math.fabs(remaining) / per_sample))
if remaining > 0:
# we're short, add to the sample
curr_ids = np.random.choice(data.index.values, num_samples, p=p)
sample_rows = pd.concat([sample_rows, data.loc[curr_ids]])
else:
# we've overshot, remove from existing samples (FIFO)
sample_rows = sample_rows.iloc[num_samples:].copy()
# update the total and check for the closest result
curr_total = sample_rows[accounting_column].sum()
remaining = total - curr_total
if abs(remaining) < closest_remain:
closest_remain = abs(remaining)
closest = sample_rows
return closest, matched
def accounting_sample_no_replace(total, data, accounting_column, prob_column=None):
"""
Samples rows with accounting without replacement.
Parameters
----------
total : int
The control total the sampled rows will attempt to match.
data: pandas.DataFrame
Table to sample from.
accounting_column: string
Name of column with accounting totals/quantities to apply towards the control.
prob_column: string, optional, default None
Name of the column in the data to provide probabilities or weights.
Returns
-------
sample_rows : pandas.DataFrame
Table containing the sample.
matched: bool
Indicates if the total was matched exactly.
"""
# make sure this is even feasible
if total > data[accounting_column].sum():
raise ValueError('Control total exceeds the available samples')
# check for probabilities
p = get_probs(data, prob_column)
# shuffle the rows
if p is None:
# random shuffle
shuff_idx = np.random.permutation(data.index.values)
else:
# weighted shuffle
ran_p = pd.Series(np.power(np.random.rand(len(p)), 1.0 / p), index=data.index)
ran_p.sort_values(ascending=False)
shuff_idx = ran_p.index.values
# get the initial sample
shuffle = data.loc[shuff_idx]
csum = np.cumsum(shuffle[accounting_column].values)
pos = np.searchsorted(csum, total, 'right')
sample = shuffle.iloc[:pos]
# refine the sample
sample_idx = sample.index.values
sample_total = sample[accounting_column].sum()
shortage = total - sample_total
matched = False
for idx, row in shuffle.iloc[pos:].iterrows():
if shortage == 0:
# we've matached
matched = True
break
# add the current element if it doesnt exceed the total
cnt = row[accounting_column]
if cnt <= shortage:
sample_idx = np.append(sample_idx, idx)
shortage -= cnt
return shuffle.loc[sample_idx].copy(), matched
def sample_rows(total, data, replace=True, accounting_column=None,
max_iterations=50, prob_column=None, return_status=False):
"""
Samples and returns rows from a data frame while matching a desired control total. The total may
represent a simple row count or may attempt to match a sum/quantity from an accounting column.
Parameters
----------
total : int
The control total the sampled rows will attempt to match.
data: pandas.DataFrame
Table to sample from.
replace: bool, optional, default True
Indicates if sampling with or without replacement.
accounting_column: string, optional
Name of column with accounting totals/quantities to apply towards the control.
If not provided then row counts will be used for accounting.
max_iterations: int, optional, default 50
When using an accounting attribute, the maximum number of sampling iterations
that will be applied. Only applicable when sampling with replacement.
prob_column: string, optional, default None
If provided, name of the column in the data frame to provide probabilities
or weights. If not provided, the sampling is random.
return_status: bool, optional, default True
If True, will also return a bool indicating if the total was matched exactly.
Returns
-------
sample_rows : pandas.DataFrame
Table containing the sample.
matched: bool
If return_status is True, returns True if total is matched exactly.
"""
if not data.index.is_unique:
raise ValueError('Data must have a unique index')
# simplest case, just return n random rows
if accounting_column is None:
if replace is False and total > len(data.index.values):
raise ValueError('Control total exceeds the available samples')
p = get_probs(prob_column)
rows = data.loc[np.random.choice(
data.index.values, int(total), replace=replace, p=p)].copy()
matched = True
# sample with accounting
else:
if replace:
rows, matched = accounting_sample_replace(
total, data, accounting_column, prob_column, max_iterations)
else:
rows, matched = accounting_sample_no_replace(
total, data, accounting_column, prob_column)
# return the results
if return_status:
return rows, matched
else:
return rows
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.