repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
harshaneelhg/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 138 | 14048 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.cross_validation import train_test_split, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
mgraupe/acq4 | acq4/pyqtgraph/widgets/MatplotlibWidget.py | 30 | 1442 | from ..Qt import QtGui, QtCore, USE_PYSIDE, USE_PYQT5
import matplotlib
if not USE_PYQT5:
if USE_PYSIDE:
matplotlib.rcParams['backend.qt4']='PySide'
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
else:
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
class MatplotlibWidget(QtGui.QWidget):
"""
Implements a Matplotlib figure inside a QWidget.
Use getFigure() and redraw() to interact with matplotlib.
Example::
mw = MatplotlibWidget()
subplot = mw.getFigure().add_subplot(111)
subplot.plot(x,y)
mw.draw()
"""
def __init__(self, size=(5.0, 4.0), dpi=100):
QtGui.QWidget.__init__(self)
self.fig = Figure(size, dpi=dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.toolbar = NavigationToolbar(self.canvas, self)
self.vbox = QtGui.QVBoxLayout()
self.vbox.addWidget(self.toolbar)
self.vbox.addWidget(self.canvas)
self.setLayout(self.vbox)
def getFigure(self):
return self.fig
def draw(self):
self.canvas.draw()
| mit |
aminert/scikit-learn | sklearn/linear_model/tests/test_randomized_l1.py | 214 | 4690 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model.base import center_data
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
X, y, _, _, _ = center_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| bsd-3-clause |
0asa/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 3 | 22131 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
"""Ridge regression convergence test using score
TODO: for this test to be robust, we should use a dataset instead
of np.random.
"""
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
"""Test shape of coef_ and intercept_
"""
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
"""Test intercept with multiple targets GH issue #708
"""
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
"""Test BayesianRegression ridge classifier
TODO: test also n_samples > n_features
"""
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
"""On alpha=0., Ridge and OLS yield the same solution."""
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
"""Tests the ridge object using individual penalties"""
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'auto' can handle negative labels.
clf = RidgeClassifier(class_weight='auto')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'auto', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='auto')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weights_cv():
"""
Test class weights for cross validated ridge classifier.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
"""
Test _RidgeCV's store_cv_values attribute.
"""
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridge_sample_weights_in_feature_space():
"""Check that Cholesky solver in feature space applies sample_weights
correctly.
"""
rng = np.random.RandomState(42)
n_samples_list = [5, 6, 7] * 2
n_features_list = [7, 6, 5] * 2
n_targets_list = [1, 1, 1, 2, 2, 2]
noise = 1.
alpha = 2.
alpha = np.atleast_1d(alpha)
for n_samples, n_features, n_targets in zip(n_samples_list,
n_features_list,
n_targets_list):
X = rng.randn(n_samples, n_features)
beta = rng.randn(n_features, n_targets)
Y = X.dot(beta)
Y_noisy = Y + rng.randn(*Y.shape) * np.sqrt((Y ** 2).sum(0)) * noise
K = X.dot(X.T)
sample_weights = 1. + (rng.randn(n_samples) ** 2) * 10
coef_sample_space = _solve_cholesky_kernel(K, Y_noisy, alpha,
sample_weight=sample_weights)
coef_feature_space = _solve_cholesky(X, Y_noisy, alpha,
sample_weight=sample_weights)
assert_array_almost_equal(X.T.dot(coef_sample_space),
coef_feature_space.T)
def test_raises_value_error_if_sample_weights_greater_than_1d():
"""Sample weights must be either scalar or 1D"""
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
"""Sample weights must work with sparse matrices"""
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
"""Tests whether a ValueError is raised if a non-identified solver
is passed to ridge_regression"""
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
| bsd-3-clause |
lzamparo/SdA_reduce | utils/lle_neighbours_pipeline.py | 1 | 4247 | """
==========
ISOMAP neighbours parameter CV pipeline
==========
Use a pipeline to find the best neighbourhood size parameter for ISOMAP.
Adapted from:
http://scikit-learn.org/stable/auto_examples/decomposition/plot_kernel_pca.html#example-decomposition-plot-kernel-pca-py
http://scikit-learn.org/stable/auto_examples/grid_search_digits.html#example-grid-search-digits-py
"""
import numpy as np
import pickle
from optparse import OptionParser
from tables import *
from sklearn.manifold import LocallyLinearEmbedding
from sklearn.cluster import KMeans
from sklearn.metrics import v_measure_score, make_scorer, homogeneity_score
from extract_datasets import extract_labeled_chunkrange
from sklearn.preprocessing import scale
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
np.random.seed(0)
# parse commandline arguments
op = OptionParser()
op.add_option("--h5file",
dest="inputfile", help="Read data input from this hdf5 file.")
op.add_option("--size",
dest="size", type="int", help="Extract the first size chunks of the data set and labels.")
op.add_option("--sample-size",
dest="samplesize", type="int", help="The max size of the samples")
op.add_option("--output",
dest="outfile", help="Write the estimator model to this file.")
op.add_option("--num-jobs",
dest="jobs", type="int", help="Use these number of jobs in parallel for GridSearchCV")
(opts, args) = op.parse_args()
###############################################################################
# Load a training set from the given .h5 file
datafile = openFile(opts.inputfile, mode = "r", title = "Data is stored here")
# Extract some of the dataset from the datafile
X, labels = extract_labeled_chunkrange(datafile, opts.size)
# Sample from the dataset
wt_points = np.nonzero(labels[:,0] == 0)[0]
foci_points = np.nonzero(labels[:,0] == 1)[0]
ab_nuclei_points = np.nonzero(labels[:,0] == 2)[0]
wt_data = X[wt_points,5:]
foci_data = X[foci_points,5:]
ab_nuclei_data = X[ab_nuclei_points,5:]
wt_labels = labels[wt_points,0]
foci_labels = labels[foci_points,0]
ab_nuclei_labels = labels[ab_nuclei_points,0]
# Figure out the sample sizes based on the shape of the *_labels arrays and the
# sample size argument
wt_samplesize = min(opts.samplesize,wt_data.shape[0])
foci_samplesize = min(opts.samplesize,foci_data.shape[0])
ab_nuclei_samplesize = min(opts.samplesize, ab_nuclei_data.shape[0])
# Use np.random.permutation(array)[0:size,:] to sample u at random
# from the strata.
wt_data_sample = np.random.permutation(wt_data)[0:wt_samplesize,:]
foci_data_sample = np.random.permutation(foci_data)[0:foci_samplesize,:]
ab_nuclei_sample = np.random.permutation(ab_nuclei_data)[0:ab_nuclei_samplesize,:]
D = np.vstack((wt_data_sample,foci_data_sample,ab_nuclei_sample))
D_labels = np.hstack((wt_labels[0:wt_samplesize],foci_labels[0:foci_samplesize],ab_nuclei_labels[0:ab_nuclei_samplesize]))
D_scaled = scale(D)
datafile.close()
##################
# Range of parameters to consider for neighbours
neighbours = np.arange(5,50,5)
# Set up the method -> kmeans -> h-measure && LLE -> kmeans -> h-measure pipelines
lle = LocallyLinearEmbedding(n_neighbors=5, n_components=30)
kmeans = KMeans(n_clusters=3)
# Make a scoring function for the pipeline
v_measure_scorer = make_scorer(v_measure_score)
homogeneity_scorer = make_scorer(homogeneity_score)
pipe = Pipeline(steps=[('lle', lle), ('kmeans', kmeans)])
# Set the model parameters to cycle over using '__' a prefix
estimator = GridSearchCV(pipe, dict(lle__n_neighbors=neighbours), scoring=homogeneity_scorer, n_jobs=opts.jobs)
estimator.fit(D_scaled,D_labels)
# Dump the estimator to a file
f = file(opts.outfile, 'wb')
pickle.dump(estimator, f)
f.close()
# Report the best parameter values
print("Best estimator found on test data set:")
print()
print(estimator.best_estimator_)
print()
print("Best parameters fond on test data set:")
print()
print(estimator.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in estimator.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() / 2, params))
print()
| bsd-3-clause |
VigneshMohan1/spark-branch-2.3 | python/pyspark/sql/session.py | 21 | 25693 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
from functools import reduce
from threading import RLock
if sys.version >= '3':
basestring = unicode = str
else:
from itertools import imap as map
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.sql.catalog import Catalog
from pyspark.sql.conf import RuntimeConfig
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import Row, DataType, StringType, StructType, _make_type_verifier, \
_infer_schema, _has_nulltype, _merge_type, _create_converter, _parse_datatype_string
from pyspark.sql.utils import install_exception_handler
__all__ = ["SparkSession"]
def _monkey_patch_RDD(sparkSession):
def toDF(self, schema=None, sampleRatio=None):
"""
Converts current :class:`RDD` into a :class:`DataFrame`
This is a shorthand for ``spark.createDataFrame(rdd, schema, sampleRatio)``
:param schema: a :class:`pyspark.sql.types.StructType` or list of names of columns
:param samplingRatio: the sample ratio of rows used for inferring
:return: a DataFrame
>>> rdd.toDF().collect()
[Row(name=u'Alice', age=1)]
"""
return sparkSession.createDataFrame(self, schema, sampleRatio)
RDD.toDF = toDF
class SparkSession(object):
"""The entry point to programming Spark with the Dataset and DataFrame API.
A SparkSession can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
To create a SparkSession, use the following builder pattern:
>>> spark = SparkSession.builder \\
... .master("local") \\
... .appName("Word Count") \\
... .config("spark.some.config.option", "some-value") \\
... .getOrCreate()
"""
class Builder(object):
"""Builder for :class:`SparkSession`.
"""
_lock = RLock()
_options = {}
@since(2.0)
def config(self, key=None, value=None, conf=None):
"""Sets a config option. Options set using this method are automatically propagated to
both :class:`SparkConf` and :class:`SparkSession`'s own configuration.
For an existing SparkConf, use `conf` parameter.
>>> from pyspark.conf import SparkConf
>>> SparkSession.builder.config(conf=SparkConf())
<pyspark.sql.session...
For a (key, value) pair, you can omit parameter names.
>>> SparkSession.builder.config("spark.some.config.option", "some-value")
<pyspark.sql.session...
:param key: a key name string for configuration property
:param value: a value for configuration property
:param conf: an instance of :class:`SparkConf`
"""
with self._lock:
if conf is None:
self._options[key] = str(value)
else:
for (k, v) in conf.getAll():
self._options[k] = v
return self
@since(2.0)
def master(self, master):
"""Sets the Spark master URL to connect to, such as "local" to run locally, "local[4]"
to run locally with 4 cores, or "spark://master:7077" to run on a Spark standalone
cluster.
:param master: a url for spark master
"""
return self.config("spark.master", master)
@since(2.0)
def appName(self, name):
"""Sets a name for the application, which will be shown in the Spark web UI.
If no application name is set, a randomly generated name will be used.
:param name: an application name
"""
return self.config("spark.app.name", name)
@since(2.0)
def enableHiveSupport(self):
"""Enables Hive support, including connectivity to a persistent Hive metastore, support
for Hive serdes, and Hive user-defined functions.
"""
return self.config("spark.sql.catalogImplementation", "hive")
@since(2.0)
def getOrCreate(self):
"""Gets an existing :class:`SparkSession` or, if there is no existing one, creates a
new one based on the options set in this builder.
This method first checks whether there is a valid global default SparkSession, and if
yes, return that one. If no valid global default SparkSession exists, the method
creates a new SparkSession and assigns the newly created SparkSession as the global
default.
>>> s1 = SparkSession.builder.config("k1", "v1").getOrCreate()
>>> s1.conf.get("k1") == s1.sparkContext.getConf().get("k1") == "v1"
True
In case an existing SparkSession is returned, the config options specified
in this builder will be applied to the existing SparkSession.
>>> s2 = SparkSession.builder.config("k2", "v2").getOrCreate()
>>> s1.conf.get("k1") == s2.conf.get("k1")
True
>>> s1.conf.get("k2") == s2.conf.get("k2")
True
"""
with self._lock:
from pyspark.context import SparkContext
from pyspark.conf import SparkConf
session = SparkSession._instantiatedSession
if session is None or session._sc._jsc is None:
sparkConf = SparkConf()
for key, value in self._options.items():
sparkConf.set(key, value)
sc = SparkContext.getOrCreate(sparkConf)
# This SparkContext may be an existing one.
for key, value in self._options.items():
# we need to propagate the confs
# before we create the SparkSession. Otherwise, confs like
# warehouse path and metastore url will not be set correctly (
# these confs cannot be changed once the SparkSession is created).
sc._conf.set(key, value)
session = SparkSession(sc)
for key, value in self._options.items():
session._jsparkSession.sessionState().conf().setConfString(key, value)
for key, value in self._options.items():
session.sparkContext._conf.set(key, value)
return session
builder = Builder()
_instantiatedSession = None
@ignore_unicode_prefix
def __init__(self, sparkContext, jsparkSession=None):
"""Creates a new SparkSession.
>>> from datetime import datetime
>>> spark = SparkSession(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> spark.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
from pyspark.sql.context import SQLContext
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if jsparkSession is None:
jsparkSession = self._jvm.SparkSession(self._jsc.sc())
self._jsparkSession = jsparkSession
self._jwrapped = self._jsparkSession.sqlContext()
self._wrapped = SQLContext(self._sc, self, self._jwrapped)
_monkey_patch_RDD(self)
install_exception_handler()
# If we had an instantiated SparkSession attached with a SparkContext
# which is stopped now, we need to renew the instantiated SparkSession.
# Otherwise, we will use invalid SparkSession when we call Builder.getOrCreate.
if SparkSession._instantiatedSession is None \
or SparkSession._instantiatedSession._sc._jsc is None:
SparkSession._instantiatedSession = self
def _repr_html_(self):
return """
<div>
<p><b>SparkSession - {catalogImplementation}</b></p>
{sc_HTML}
</div>
""".format(
catalogImplementation=self.conf.get("spark.sql.catalogImplementation"),
sc_HTML=self.sparkContext._repr_html_()
)
@since(2.0)
def newSession(self):
"""
Returns a new SparkSession as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self._jsparkSession.newSession())
@property
@since(2.0)
def sparkContext(self):
"""Returns the underlying :class:`SparkContext`."""
return self._sc
@property
@since(2.0)
def version(self):
"""The version of Spark on which this application is running."""
return self._jsparkSession.version()
@property
@since(2.0)
def conf(self):
"""Runtime configuration interface for Spark.
This is the interface through which the user can get and set all Spark and Hadoop
configurations that are relevant to Spark SQL. When getting the value of a config,
this defaults to the value set in the underlying :class:`SparkContext`, if any.
"""
if not hasattr(self, "_conf"):
self._conf = RuntimeConfig(self._jsparkSession.conf())
return self._conf
@property
@since(2.0)
def catalog(self):
"""Interface through which the user may create, drop, alter or query underlying
databases, tables, functions etc.
"""
if not hasattr(self, "_catalog"):
self._catalog = Catalog(self)
return self._catalog
@property
@since(2.0)
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
from pyspark.sql.context import UDFRegistration
return UDFRegistration(self._wrapped)
@since(2.0)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> spark.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> spark.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if end is None:
jdf = self._jsparkSession.range(0, int(start), int(step), int(numPartitions))
else:
jdf = self._jsparkSession.range(int(start), int(end), int(step), int(numPartitions))
return DataFrame(jdf, self._wrapped)
def _inferSchemaFromList(self, data):
"""
Infer schema from list of Row or tuple.
:param data: list of Row or tuple
:return: :class:`pyspark.sql.types.StructType`
"""
if not data:
raise ValueError("can not infer schema from empty dataset")
first = data[0]
if type(first) is dict:
warnings.warn("inferring schema from dict is deprecated,"
"please use pyspark.sql.Row instead")
schema = reduce(_merge_type, map(_infer_schema, data))
if _has_nulltype(schema):
raise ValueError("Some of types cannot be determined after inferring")
return schema
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
first = rdd.first()
if not first:
raise ValueError("The first row in RDD is empty, "
"can not infer schema")
if type(first) is dict:
warnings.warn("Using RDD of dict to inferSchema is deprecated. "
"Use pyspark.sql.Row instead")
if samplingRatio is None:
schema = _infer_schema(first)
if _has_nulltype(schema):
for row in rdd.take(100)[1:]:
schema = _merge_type(schema, _infer_schema(row))
if not _has_nulltype(schema):
break
else:
raise ValueError("Some of types cannot be determined by the "
"first 100 rows, please try again with sampling")
else:
if samplingRatio < 0.99:
rdd = rdd.sample(False, float(samplingRatio))
schema = rdd.map(_infer_schema).reduce(_merge_type)
return schema
def _createFromRDD(self, rdd, schema, samplingRatio):
"""
Create an RDD for DataFrame from an existing RDD, returns the RDD and schema.
"""
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchema(rdd, samplingRatio)
converter = _create_converter(struct)
rdd = rdd.map(converter)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
rdd = rdd.map(schema.toInternal)
return rdd, schema
def _createFromLocal(self, data, schema):
"""
Create an RDD for DataFrame from a list or pandas.DataFrame, returns
the RDD and schema.
"""
# make sure data could consumed multiple times
if not isinstance(data, list):
data = list(data)
if schema is None or isinstance(schema, (list, tuple)):
struct = self._inferSchemaFromList(data)
converter = _create_converter(struct)
data = map(converter, data)
if isinstance(schema, (list, tuple)):
for i, name in enumerate(schema):
struct.fields[i].name = name
struct.names[i] = name
schema = struct
elif not isinstance(schema, StructType):
raise TypeError("schema should be StructType or list or None, but got: %s" % schema)
# convert python objects to sql data
data = [schema.toInternal(row) for row in data]
return self._sc.parallelize(data), schema
@since(2.0)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string, it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. row, tuple, int, boolean,
etc.), or :class:`list`, or :class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is ``None``. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`. We can also use
``int`` as a short name for ``IntegerType``.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> spark.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> spark.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> spark.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> spark.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = spark.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = spark.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = spark.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> spark.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> spark.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> spark.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> spark.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
if isinstance(data, DataFrame):
raise TypeError("data is already a DataFrame")
if isinstance(schema, basestring):
schema = _parse_datatype_string(schema)
try:
import pandas
has_pandas = True
except Exception:
has_pandas = False
if has_pandas and isinstance(data, pandas.DataFrame):
if schema is None:
schema = [str(x) for x in data.columns]
data = [r.tolist() for r in data.to_records(index=False)]
if isinstance(schema, StructType):
verify_func = _make_type_verifier(schema) if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
dataType = schema
schema = StructType().add("value", schema)
verify_func = _make_type_verifier(
dataType, name="field value") if verifySchema else lambda _: True
def prepare(obj):
verify_func(obj)
return obj,
else:
if isinstance(schema, list):
schema = [x.encode('utf-8') if not isinstance(x, str) else x for x in schema]
prepare = lambda obj: obj
if isinstance(data, RDD):
rdd, schema = self._createFromRDD(data.map(prepare), schema, samplingRatio)
else:
rdd, schema = self._createFromLocal(map(prepare, data), schema)
jrdd = self._jvm.SerDeUtil.toJavaArray(rdd._to_java_object_rdd())
jdf = self._jsparkSession.applySchemaToPythonRDD(jrdd.rdd(), schema.json())
df = DataFrame(jdf, self._wrapped)
df._schema = schema
return df
@ignore_unicode_prefix
@since(2.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return DataFrame(self._jsparkSession.sql(sqlQuery), self._wrapped)
@since(2.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> df.createOrReplaceTempView("table1")
>>> df2 = spark.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return DataFrame(self._jsparkSession.table(tableName), self._wrapped)
@property
@since(2.0)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self._wrapped)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Evolving.
:return: :class:`DataStreamReader`
"""
return DataStreamReader(self._wrapped)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Evolving.
:return: :class:`StreamingQueryManager`
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._jsparkSession.streams())
@since(2.0)
def stop(self):
"""Stop the underlying :class:`SparkContext`.
"""
self._sc.stop()
SparkSession._instantiatedSession = None
@since(2.0)
def __enter__(self):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
"""
return self
@since(2.0)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Enable 'with SparkSession.builder.(...).getOrCreate() as session: app' syntax.
Specifically stop the SparkSession on exit of the with block.
"""
self.stop()
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
AllenDowney/ThinkStats2 | code/chap13soln.py | 68 | 2961 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import thinkplot
import thinkstats2
import survival
def CleanData(resp):
"""Cleans respondent data.
resp: DataFrame
"""
resp.cmdivorcx.replace([9998, 9999], np.nan, inplace=True)
resp['notdivorced'] = resp.cmdivorcx.isnull().astype(int)
resp['duration'] = (resp.cmdivorcx - resp.cmmarrhx) / 12.0
resp['durationsofar'] = (resp.cmintvw - resp.cmmarrhx) / 12.0
month0 = pandas.to_datetime('1899-12-15')
dates = [month0 + pandas.DateOffset(months=cm)
for cm in resp.cmbirth]
resp['decade'] = (pandas.DatetimeIndex(dates).year - 1900) // 10
def ResampleDivorceCurve(resps):
"""Plots divorce curves based on resampled data.
resps: list of respondent DataFrames
"""
for _ in range(41):
samples = [thinkstats2.ResampleRowsWeighted(resp)
for resp in resps]
sample = pandas.concat(samples, ignore_index=True)
PlotDivorceCurveByDecade(sample, color='#225EA8', alpha=0.1)
thinkplot.Show(xlabel='years',
axis=[0, 28, 0, 1])
def ResampleDivorceCurveByDecade(resps):
"""Plots divorce curves for each birth cohort.
resps: list of respondent DataFrames
"""
for i in range(41):
samples = [thinkstats2.ResampleRowsWeighted(resp)
for resp in resps]
sample = pandas.concat(samples, ignore_index=True)
groups = sample.groupby('decade')
if i == 0:
survival.AddLabelsByDecade(groups, alpha=0.7)
EstimateSurvivalByDecade(groups, alpha=0.1)
thinkplot.Save(root='survival7',
xlabel='years',
axis=[0, 28, 0, 1])
def EstimateSurvivalByDecade(groups, **options):
"""Groups respondents by decade and plots survival curves.
groups: GroupBy object
"""
thinkplot.PrePlot(len(groups))
for name, group in groups:
print(name, len(group))
_, sf = EstimateSurvival(group)
thinkplot.Plot(sf, **options)
def EstimateSurvival(resp):
"""Estimates the survival curve.
resp: DataFrame of respondents
returns: pair of HazardFunction, SurvivalFunction
"""
complete = resp[resp.notdivorced == 0].duration
ongoing = resp[resp.notdivorced == 1].durationsofar
hf = survival.EstimateHazardFunction(complete, ongoing)
sf = hf.MakeSurvival()
return hf, sf
def main():
resp6 = survival.ReadFemResp2002()
CleanData(resp6)
married6 = resp6[resp6.evrmarry==1]
resp7 = survival.ReadFemResp2010()
CleanData(resp7)
married7 = resp7[resp7.evrmarry==1]
ResampleDivorceCurveByDecade([married6, married7])
if __name__ == '__main__':
main()
| gpl-3.0 |
printedheart/opennars | nars_gui/src/main/python/nef_minimal/nef_minimal.py | 1 | 10003 | # A Minimal Example of the Neural Engineering Framework
#
# The NEF is a method for building large-scale neural models using realistic
# neurons. It is a neural compiler: you specify the high-level computations
# the model needs to compute, and the properties of the neurons themselves,
# and the NEF determines the neural connections needed to perform those
# operations.
#
# The standard software for building NEF models is Nengo (http://nengo.ca).
# Nengo is a cross-platform Java application that provides both a drag-and-drop
# graphical user environment and a Python scripting interface for
# creating these neural models. It has been used to model a wide variety of
# behaviour, including motor control, visual attention, serial recall, action
# selection, working memory, attractor networks, inductive reasoning, path
# integration, and planning with problem solving.
#
# However, given the complexity of Nengo, and due to the fact that this is a
# fairly non-standard approach to neural modelling, we feel it is also useful
# to have a simple example that shows exactly how the NEF works, from
# beginning to end. That is the goal of this script.
#
# This script shows how to build a simple feed-forward network of leaky
# integrate-and-fire neurons where each population encodes a one-dimensional
# value and the connection weights between the populations are optimized to
# compute some arbitrary function. This same approach is used in Nengo,
# extended to multi-dimensional representation, multiple populations of
# neurons, and recurrent connections.
#
# To change the input to the system, change 'input'
# To change the function computed by the weights, change 'function'
#
# The size of the populations and their neural properties can also be adjusted
# by changing the parameters below.
#
# This script requires Python (http://www.python.org/) and Numpy
# (http://numpy.scipy.org/) to run, and Matplotlib (http://matplotlib.org/) to
# produce the output graphs.
#
# For more information on the Neural Engineering Framework and the Nengo
# software, please see http://nengo.ca
import random
import math
#################################################
# Parameters
#################################################
dt = 0.001 # simulation time step
t_rc = 0.02 # membrane RC time constant
t_ref = 0.002 # refractory period
t_pstc = 0.1 # post-synaptic time constant
N_A = 50 # number of neurons in first population
N_B = 40 # number of neurons in second population
N_samples = 100 # number of sample points to use when finding decoders
rate_A = 25, 75 # range of maximum firing rates for population A
rate_B = 50, 100 # range of maximum firing rates for population B
# the input to the system over time
def input(t):
return math.sin(t)
# the function to compute between A and B
def function(x):
return x*x
#################################################
# Step 1: Initialization
#################################################
# create random encoders for the two populations
encoder_A = [random.choice([-1,1]) for i in range(N_A)]
encoder_B = [random.choice([-1,1]) for i in range(N_B)]
def generate_gain_and_bias(count, intercept_low, intercept_high, rate_low, rate_high):
gain = []
bias = []
for i in range(count):
# desired intercept (x value for which the neuron starts firing
intercept = random.uniform(intercept_low, intercept_high)
# desired maximum rate (firing rate when x is maximum)
rate = random.uniform(rate_low, rate_high)
# this algorithm is specific to LIF neurons, but should
# generate gain and bias values to produce the desired
# intercept and rate
z = 1.0 / (1-math.exp((t_ref-(1.0/rate))/t_rc))
g = (1 - z)/(intercept - 1.0)
b = 1 - g*intercept
gain.append(g)
bias.append(b)
return gain,bias
# random gain and bias for the two populations
gain_A, bias_A = generate_gain_and_bias(N_A, -1, 1, rate_A[0], rate_A[1])
gain_B, bias_B = generate_gain_and_bias(N_B, -1, 1, rate_B[0], rate_B[1])
# a simple leaky integrate-and-fire model, scaled so that v=0 is resting
# voltage and v=1 is the firing threshold
def run_neurons(input,v,ref):
spikes=[]
for i in range(len(v)):
dV = dt * (input[i]-v[i]) / t_rc # the LIF voltage change equation
v[i] += dV
if v[i]<0: v[i]=0 # don't allow voltage to go below 0
if ref[i]>0: # if we are in our refractory period
v[i]=0 # keep voltage at zero and
ref[i]-=dt # decrease the refractory period
if v[i]>1: # if we have hit threshold
spikes.append(True) # spike
v[i] = 0 # reset the voltage
ref[i] = t_ref # and set the refractory period
else:
spikes.append(False)
return spikes
# measure the spike rate of a whole population for a given represented value x
def compute_response(x, encoder, gain, bias, time_limit=0.5):
N = len(encoder) # number of neurons
v = [0]*N # voltage
ref = [0]*N # refractory period
# compute input corresponding to x
input = []
for i in range(N):
input.append(x*encoder[i]*gain[i]+bias[i])
v[i]=random.uniform(0,1) # randomize the initial voltage level
count = [0]*N # spike count for each neuron
# feed the input into the population for a given amount of time
t = 0
while t<time_limit:
spikes=run_neurons(input, v, ref)
for i,s in enumerate(spikes):
if s: count[i]+=1
t += dt
return [c/time_limit for c in count] # return the spike rate (in Hz)
# compute the tuning curves for a population
def compute_tuning_curves(encoder, gain, bias):
# generate a set of x values to sample at
x_values=[i*2.0/N_samples - 1.0 for i in range(N_samples)]
# build up a matrix of neural responses to each input (i.e. tuning curves)
A=[]
for x in x_values:
response=compute_response(x, encoder, gain, bias)
A.append(response)
return x_values, A
# compute decoders
import numpy
def compute_decoder(encoder, gain, bias, function=lambda x:x):
# get the tuning curves
x_values,A = compute_tuning_curves(encoder, gain, bias)
# get the desired decoded value for each sample point
value=numpy.array([[function(x)] for x in x_values])
# find the optimum linear decoder
A=numpy.array(A).T
Gamma=numpy.dot(A, A.T)
Upsilon=numpy.dot(A, value)
Ginv=numpy.linalg.pinv(Gamma)
decoder=numpy.dot(Ginv,Upsilon)/dt
return decoder
# find the decoders for A and B
decoder_A=compute_decoder(encoder_A, gain_A, bias_A, function=function)
decoder_B=compute_decoder(encoder_B, gain_B, bias_B)
# compute the weight matrix
weights=numpy.dot(decoder_A, [encoder_B])
#################################################
# Step 2: Running the simulation
#################################################
v_A = [0.0]*N_A # voltage for population A
ref_A = [0.0]*N_A # refractory period for population A
input_A = [0.0]*N_A # input for population A
v_B = [0.0]*N_B # voltage for population B
ref_B = [0.0]*N_B # refractory period for population B
input_B = [0.0]*N_B # input for population B
# scaling factor for the post-synaptic filter
pstc_scale=1.0-math.exp(-dt/t_pstc)
# for storing simulation data to plot afterward
inputs=[]
times=[]
outputs=[]
ideal=[]
output=0.0 # the decoded output value from population B
t=0
while t<10.0:
# call the input function to determine the input value
x=input(t)
# convert the input value into an input for each neuron
for i in range(N_A):
input_A[i]=x*encoder_A[i]*gain_A[i]+bias_A[i]
# run population A and determine which neurons spike
spikes_A=run_neurons(input_A, v_A, ref_A)
# decay all of the inputs (implementing the post-synaptic filter)
for j in range(N_B):
input_B[j]*=(1.0-pstc_scale)
# for each neuron that spikes, increase the input current
# of all the neurons it is connected to by the synaptic
# connection weight
for i,s in enumerate(spikes_A):
if s:
for j in range(N_B):
input_B[j]+=weights[i][j]*pstc_scale
# compute the total input into each neuron in population B
# (taking into account gain and bias)
total_B=[0]*N_B
for j in range(N_B):
total_B[j]=gain_B[j]*input_B[j]+bias_B[j]
# run population B and determine which neurons spike
spikes_B=run_neurons(total_B, v_B, ref_B)
# for each neuron in B that spikes, update our decoded value
# (also applying the same post-synaptic filter)
output*=(1.0-pstc_scale)
for j,s in enumerate(spikes_B):
if s:
output+=decoder_B[j][0]*pstc_scale
print t, output
times.append(t)
inputs.append(x)
outputs.append(output)
ideal.append(function(x))
t+=dt
#################################################
# Step 3: Plot the results
#################################################
x,A = compute_tuning_curves(encoder_A, gain_A, bias_A)
x,B = compute_tuning_curves(encoder_B, gain_B, bias_B)
import pylab
pylab.figure()
pylab.plot(x, A)
pylab.title('Tuning curves for population A')
pylab.figure()
pylab.plot(x, B)
pylab.title('Tuning curves for population B')
pylab.figure()
pylab.plot(times, inputs, label='input')
pylab.plot(times, ideal, label='ideal')
pylab.plot(times, outputs, label='output')
pylab.title('Simulation results')
pylab.legend()
pylab.show()
| agpl-3.0 |
vishank94/vishank94.github.io | markdown_generator/publications.py | 197 | 3887 |
# coding: utf-8
# # Publications markdown generator for academicpages
#
# Takes a TSV of publications with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook, with the core python code in publications.py. Run either from the `markdown_generator` folder after replacing `publications.tsv` with one that fits your format.
#
# TODO: Make this work with BibTex and other databases of citations, rather than Stuart's non-standard TSV format and citation style.
#
# ## Data format
#
# The TSV needs to have the following columns: pub_date, title, venue, excerpt, citation, site_url, and paper_url, with a header at the top.
#
# - `excerpt` and `paper_url` can be blank, but the others must have values.
# - `pub_date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper. The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/publications/YYYY-MM-DD-[url_slug]`
# ## Import pandas
#
# We are using the very handy pandas library for dataframes.
# In[2]:
import pandas as pd
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
publications = pd.read_csv("publications.tsv", sep="\t", header=0)
publications
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page. If you don't want something to appear (like the "Recommended citation")
# In[5]:
import os
for row, item in publications.iterrows():
md_filename = str(item.pub_date) + "-" + item.url_slug + ".md"
html_filename = str(item.pub_date) + "-" + item.url_slug
year = item.pub_date[:4]
## YAML variables
md = "---\ntitle: \"" + item.title + '"\n'
md += """collection: publications"""
md += """\npermalink: /publication/""" + html_filename
if len(str(item.excerpt)) > 5:
md += "\nexcerpt: '" + html_escape(item.excerpt) + "'"
md += "\ndate: " + str(item.pub_date)
md += "\nvenue: '" + html_escape(item.venue) + "'"
if len(str(item.paper_url)) > 5:
md += "\npaperurl: '" + item.paper_url + "'"
md += "\ncitation: '" + html_escape(item.citation) + "'"
md += "\n---"
## Markdown description for individual page
if len(str(item.paper_url)) > 5:
md += "\n\n<a href='" + item.paper_url + "'>Download paper here</a>\n"
if len(str(item.excerpt)) > 5:
md += "\n" + html_escape(item.excerpt) + "\n"
md += "\nRecommended citation: " + item.citation
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
| mit |
yyjiang/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
rtrwalker/geotecha | examples/speccon/speccon1d_vr_mimic_terzaghi_with_pumping_at_mid_depth.py | 1 | 10319 | # speccon1d_vr example (if viewing this in docs, plots are at bottom of page)
# Mimic Terzaghi pervious top pervious bottom 1D vertical consolidation
# by specifying a time dependent pumping at mid-depth.
# A surcharge of 100kPa is applied. At mid depth a pumping vleocity is
# specified to keep the pore pressure zero. With PTPB drainage
# conditions and 'zero' pore pressure at mid-depth we expect 4 instances of
# terzaghi 1d PTIB isochrones (with drainage path equal to 1/4 H) stacked on
# top of each other. The pumping is draining the middle two of the four 1/4 H
# sections so the pumping velocity should be half the consolidation rate of
# terzaghi 1d consolidation (with drainage path H=1).
# This file should be run with python. It will not work if run with the
# speccon1d_vr.exe script program.
from __future__ import division, print_function
import numpy as np
from geotecha.speccon.speccon1d_vr import Speccon1dVR
import matplotlib.pyplot as plt
# TERZ1D_T = time values
# TERZ1D_AVP = Terzaghi 1d average excess pore pressures corresponding to
# times in TERZ1D_T. Properties are: cv=1, H=1, u0=1
# TERZ1D_Z = z/H values
# TERZ1D_POR = excess p.pressure values at depths TERZ1D_Z and times TERZ1D_T
TERZ1D_T = np.array([0.008, 0.018, 0.031, 0.049, 0.071, 0.096, 0.126,
0.159, 0.197, 0.239, 0.286, 0.34, 0.403, 0.477, 0.567,
0.684, 0.848, 1.129, 1.781])
TERZ1D_AVP = np.array(
[[ 0.8990747 , 0.84861205, 0.80132835, 0.75022262, 0.69933407,
0.65038539, 0.59948052, 0.55017049, 0.49966188, 0.44989787,
0.40039553, 0.35035814, 0.2998893 , 0.24983377, 0.20008097,
0.14990996, 0.10002108, 0.05000091, 0.01000711]])
TERZ1D_Z = np.array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6,
0.7, 0.8, 0.9, 1. ])
TERZ1D_POR = np.array(
[[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ],
[ 0.5708047 , 0.40183855, 0.31202868, 0.25060581, 0.209277 ,
0.18051017, 0.15777238, 0.1401947 , 0.12492869, 0.11139703,
0.09868545, 0.08618205, 0.07371295, 0.0613951 , 0.04916581,
0.03683692, 0.02457785, 0.01228656, 0.00245901],
[ 0.8861537 , 0.70815945, 0.57815202, 0.47709676, 0.40440265,
0.35188372, 0.30934721, 0.27584089, 0.24631156, 0.21986645,
0.19487593, 0.17022241, 0.145606 , 0.12127752, 0.09712088,
0.07276678, 0.04855051, 0.02427058, 0.00485748],
[ 0.98229393, 0.8861537 , 0.77173068, 0.66209592, 0.57402972,
0.50633278, 0.44919934, 0.40274312, 0.36079264, 0.322593 ,
0.28615206, 0.25003642, 0.21390512, 0.17817202, 0.14268427,
0.10690487, 0.07132769, 0.03565698, 0.00713634],
[ 0.9984346 , 0.96498502, 0.89182244, 0.79866319, 0.71151086,
0.63842889, 0.57300943, 0.5173496 , 0.46536864, 0.41697458,
0.37024076, 0.32365106, 0.27692667, 0.23067729, 0.18473404,
0.13841059, 0.09234855, 0.04616539, 0.00923947],
[ 0.99992277, 0.99159201, 0.95536184, 0.8897753 , 0.81537699,
0.74554825, 0.67795464, 0.61693194, 0.55750293, 0.50070214,
0.44507671, 0.38925529, 0.33311924, 0.27750057, 0.22223477,
0.16650815, 0.11109548, 0.05553705, 0.0111151 ],
[ 0.9999979 , 0.9984346 , 0.9840325 , 0.94470726, 0.88846498,
0.82769841, 0.76271322, 0.69962982, 0.63517948, 0.57181325,
0.50885214, 0.44524423, 0.38110176, 0.3174894 , 0.25426314,
0.19050572, 0.12710688, 0.0635412 , 0.01271704],
[ 0.99999997, 0.99977515, 0.99506515, 0.97461982, 0.93621426,
0.88684221, 0.82720628, 0.76436582, 0.69689722, 0.62871883,
0.5600537 , 0.49025645, 0.41969701, 0.34965996, 0.28003063,
0.2098124 , 0.13998847, 0.06998076, 0.01400585],
[ 1. , 0.99997517, 0.99868444, 0.9892702 , 0.96479424,
0.92594095, 0.87215551, 0.81066205, 0.74161724, 0.67020692,
0.59748729, 0.52320368, 0.44795959, 0.37322105, 0.29890288,
0.2239528 , 0.14942309, 0.07469716, 0.01494978],
[ 1. , 0.99999789, 0.99968908, 0.99551731, 0.97956541,
0.94796078, 0.89856843, 0.83840947, 0.76868357, 0.69543129,
0.62029292, 0.54329327, 0.46519818, 0.3875934 , 0.31041531,
0.23257876, 0.15517842, 0.07757427, 0.0155256 ],
[ 1. , 0.99999973, 0.99988166, 0.9971974 , 0.98407824,
0.95504225, 0.90726835, 0.8476479 , 0.77774256, 0.70389411,
0.62795246, 0.55004364, 0.47099154, 0.39242376, 0.31428453,
0.23547787, 0.15711273, 0.07854125, 0.01571913]])
# flow_t = times values for time-dependent bottom gradient boundary
# flow_v = velocity values for time-dependent bottom gradient boundary
flow_t = np.array([ 0, 0.00000000e+00, 1.00000000e-05, 1.32571137e-05,
1.75751062e-05, 2.32995181e-05, 3.08884360e-05,
4.09491506e-05, 5.42867544e-05, 7.19685673e-05,
9.54095476e-05, 1.26485522e-04, 1.67683294e-04,
2.22299648e-04, 2.94705170e-04, 3.90693994e-04,
5.17947468e-04, 6.86648845e-04, 9.10298178e-04,
1.20679264e-03, 1.59985872e-03, 2.12095089e-03,
2.81176870e-03, 3.72759372e-03, 4.94171336e-03,
6.55128557e-03, 8.68511374e-03, 1.15139540e-02,
1.52641797e-02, 2.02358965e-02, 2.68269580e-02,
3.55648031e-02, 4.71486636e-02, 6.25055193e-02,
8.28642773e-02, 1.09854114e-01, 1.45634848e-01,
1.93069773e-01, 2.55954792e-01, 3.39322177e-01,
4.49843267e-01, 5.96362332e-01, 7.90604321e-01,
1.04811313e+00, 1.38949549e+00, 1.84206997e+00,
2.44205309e+00, 3.23745754e+00, 4.29193426e+00,
5.68986603e+00, 7.54312006e+00, 1.00000000e+01])
# flow_v comes from geotecha.consolidation.terzahgi module:
# terzaghi_1d_flowrate(z=np.array([0.0]), t=flow_t, kv=10, mv=1, gamw=10, ui=100, nterms=500)
flow_v = -np.array([ 0.00000000e+00, 1.00000000e+05, 1.78412412e+04,
1.54953209e+04, 1.34578624e+04, 1.16883065e+04,
1.01514272e+04, 8.81663000e+03, 7.65734340e+03,
6.65048985e+03, 5.77602610e+03, 5.01654435e+03,
4.35692582e+03, 3.78403963e+03, 3.28648146e+03,
2.85434652e+03, 2.47903242e+03, 2.15306785e+03,
1.86996392e+03, 1.62408493e+03, 1.41053624e+03,
1.22506677e+03, 1.06398442e+03, 9.24082570e+02,
8.02576220e+02, 6.97046575e+02, 6.05392880e+02,
5.25790600e+02, 4.56655118e+02, 3.96610163e+02,
3.44460438e+02, 2.99167808e+02, 2.59830644e+02,
2.25665819e+02, 1.95991124e+02, 1.70184572e+02,
1.47532018e+02, 1.26954815e+02, 1.07034205e+02,
8.66871910e+01, 6.59246745e+01, 4.59181293e+01,
2.84338280e+01, 1.50624045e+01, 6.48748315e+00,
2.12376806e+00, 4.83256782e-01, 6.78952680e-02,
5.03366995e-03, 1.59915607e-04, 1.65189842e-06,
3.84807183e-09])
# adjust depth values for PTPB
tslice = slice(5,-2) #restrict times
zslice = slice(None,None) # restrict zvals
t = TERZ1D_T[tslice]
z = np.append(0.25*TERZ1D_Z[zslice], [0.5 - 0.25*TERZ1D_Z[zslice][::-1], 0.5 + 0.25*TERZ1D_Z[zslice], 1 - 0.25 * TERZ1D_Z[zslice][::-1]])
# expected results
por = 100 * np.vstack((TERZ1D_POR[zslice, tslice], TERZ1D_POR[zslice, tslice][::-1,:], TERZ1D_POR[zslice, tslice], TERZ1D_POR[zslice, tslice][::-1,:]))
avp = 100 * TERZ1D_AVP[:, tslice]
settle = 100 * (1 - TERZ1D_AVP[:,tslice])
# the reader string is a template with {} indicating where parameters will be
# inserted. Use double curly braces {{}} if you need curly braces in your
# string.
reader = ("""\
H = 1
drn = 0
dTv = 0.1 /16
neig = 40
mvref = 2.0
mv = PolyLine([0,1], [0.5,0.5])
kv = PolyLine([0,1], [5,5])
surcharge_vs_time = PolyLine([0, 0.0, 10], [0,100,100])
surcharge_vs_depth = PolyLine([0, 1], [1,1])
pumping = (0.5, PolyLine(np.%s, np.%s))
ppress_z = np.%s
avg_ppress_z_pairs = [[0,1]]
settlement_z_pairs = [[0,1]]
tvals = np.%s
""" % (repr(flow_t), repr(flow_v/2), repr(z),repr(t)))
# we use 2*flow_v/4 because flow_v on it's own is for flowrate of
# terzaghi PTIB where H = 1. for this test we have basically have 4 layers
# each of H=0.25. Thus we divide dTv by 16. because our pump is
# extracting for a quarter of the height we divide the original flow_v
# by 4. But because we are using a single pump to drain both the top and
# bottom halves we then multiply by 2. This gives us our 2*flow_v/4
a = Speccon1dVR(reader)
a.make_all()
# custom plots
title = ("Mimic Terzaghi 1D by using time dependent pumping "
"at mid-depth")
fig = plt.figure(figsize=(12,5))
fig.suptitle(title)
#z vs u
ax1 = fig.add_subplot("131")
ax1.set_xlabel('Excess pore pressure, kPa')
ax1.set_ylabel('Normalised depth')
ax1.invert_yaxis()
ax1.plot(por, z,
ls="None", color='Blue', marker="+", ms=5,
label='expected')
ax1.plot(a.por, z,
ls='-', color='red', marker='o', ms=5, markerfacecolor='None',
markeredgecolor='red',
label='calculated')
# avp vs t
ax2 = fig.add_subplot("132")
ax2.set_xlabel('Time')
ax2.set_ylabel('Average excess pore pressure, kPa')
ax2.set_xscale('log')
ax2.set_xlim((0.01, 10))
ax2.plot(t, avp[0],
ls="None", color='Blue', marker="+", ms=5,
label='expected')
ax2.plot(t, a.avp[0],
ls='-', color='red', marker='o', ms=5, markerfacecolor='None',
markeredgecolor='red',
label='calculated')
leg = ax2.legend()
leg.draggable()
# settlement vs t
ax3 = fig.add_subplot("133")
ax3.set_xlabel('Time')
ax3.set_ylabel('Settlement')
ax3.invert_yaxis()
ax3.set_xscale('log')
ax3.set_xlim((0.01, 10))
ax3.plot(t, settle[0],
ls="None", color='Blue', marker="+", ms=5,
label='expected')
ax3.plot(t, a.set[0],
ls='-', color='red', marker='o', ms=5, markerfacecolor='None',
markeredgecolor='red',
label='calculated')
fig.subplots_adjust(top=0.90, bottom=0.15, left=0.1, right=0.94, wspace=0.4)
#fig.tight_layout()
plt.show()
| gpl-3.0 |
Winand/pandas | doc/sphinxext/numpydoc/plot_directive.py | 89 | 20530 | """
A special directive for generating a matplotlib plot.
.. warning::
This is a hacked version of plot_directive.py from Matplotlib.
It's very much subject to change!
Usage
-----
Can be used like this::
.. plot:: examples/example.py
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3], [4,5,6])
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
The content is interpreted as doctest formatted if it has a line starting
with ``>>>``.
The ``plot`` directive supports the options
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. Default can be changed in conf.py
and the ``image`` directive options ``alt``, ``height``, ``width``,
``scale``, ``align``, ``class``.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which plot:: file names are relative to.
(If None or empty, file names are relative to the directoly where
the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
TODO
----
* Refactor Latex output; now it's plain images, but it would be nice
to make them appear side-by-side, or in floats.
"""
from __future__ import division, absolute_import, print_function
import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback
import sphinx
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import StringIO
import warnings
warnings.warn("A plot_directive module is also available under "
"matplotlib.sphinxext; expect this numpydoc.plot_directive "
"module to be deprecated after relevant features have been "
"integrated there.",
FutureWarning, stacklevel=2)
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
app.add_config_value('plot_pre_code', '', True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_directive('plot', plot_directive, True, (0, 1, False),
**plot_directive_options)
#------------------------------------------------------------------------------
# plot:: directive
#------------------------------------------------------------------------------
from docutils.parsers.rst import directives
from docutils import nodes
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_format(arg):
return directives.choice(arg, ('python', 'lisp'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
plot_directive_options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
}
#------------------------------------------------------------------------------
# Generating output
#------------------------------------------------------------------------------
from docutils import nodes, utils
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{%- for option in options %}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{% endfor %}
{{ only_latex }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endfor %}
"""
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def run(arguments, content, options, state_machine, state, lineno):
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
options.setdefault('include-source', config.plot_include_source)
# determine input
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if arguments:
if not config.plot_basedir:
source_file_name = os.path.join(rst_dir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
code = open(source_file_name, 'r').read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = makefig(code, source_file_name, build_dir, output_base,
config)
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s: %s" % (output_base, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
opts = [':%s: %s' % (key, val) for key, val in list(options.items())
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
if j == 0:
src_link = source_link
else:
src_link = None
result = format_template(
TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
shutil.copyfile(fn, os.path.join(dest_dir,
os.path.basename(fn)))
# copy script (if necessary)
if source_file_name == rst_file:
target_name = os.path.join(dest_dir, output_base + source_ext)
f = open(target_name, 'w')
f.write(unescape_doctest(code))
f.close()
return errors
#------------------------------------------------------------------------------
# Run code and capture figures
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.image as image
from matplotlib import _pylab_helpers
import exceptions
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None):
# Change the working directory to the directory of the example, so
# it can get at its data files, if any.
pwd = os.getcwd()
old_sys_path = list(sys.path)
if code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Redirect stdout
stdout = sys.stdout
sys.stdout = StringIO()
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
exec(setup.config.plot_pre_code, ns)
exec(code, ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
#------------------------------------------------------------------------------
# Generating figures
#------------------------------------------------------------------------------
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived)
or os.stat(derived).st_mtime < os.stat(original).st_mtime)
def makefig(code, code_path, output_dir, output_base, config):
"""
Run a pyplot script *code* and save the images under *output_dir*
with file names derived from *output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
formats = []
for fmt in config.plot_formats:
if isinstance(fmt, str):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in range(1000):
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# -- We didn't find the files, so build them
results = []
ns = {}
for i, code_piece in enumerate(code_pieces):
# Clear between runs
plt.close('all')
# Run code
run_code(code_piece, code_path, ns)
# Collect images
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except exceptions.BaseException as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
# Results
results.append((code_piece, images))
return results
#------------------------------------------------------------------------------
# Relative pathnames
#------------------------------------------------------------------------------
try:
from os.path import relpath
except ImportError:
# Copied from Python 2.7
if 'posix' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
# Work out how much of the filepath is shared by start and path.
i = len(commonprefix([start_list, path_list]))
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
elif 'nt' in sys.builtin_module_names:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
from os.path import sep, curdir, join, abspath, commonprefix, \
pardir, splitunc
if not path:
raise ValueError("no path specified")
start_list = abspath(start).split(sep)
path_list = abspath(path).split(sep)
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = splitunc(path)
unc_start, rest = splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return curdir
return join(*rel_list)
else:
raise RuntimeError("Unsupported platform (no relpath available!)")
| bsd-3-clause |
xzh86/scikit-learn | sklearn/feature_extraction/image.py | 263 | 17600 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..utils.fixes import astype
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x: integer
The size of the grid in the x direction.
n_y: integer
The size of the grid in the y direction.
n_z: integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = astype(mask, dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For sklearn versions 0.14.1 and prior, return_as=np.ndarray was handled
by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling to use if
`max_patches` is not None.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches: array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
jeffmkw/DAT210x-Lab | Module4/assignment2_helper.py | 1 | 3825 | import math
import pandas as pd
from sklearn import preprocessing
# A Note on SKLearn .transform() calls:
#
# Any time you transform your data, you lose the column header names.
# This actually makes complete sense. There are essentially two types
# of transformations, those that change the scale of your features,
# and those that change your features entire. Changing the scale would
# be like changing centimeters to inches. Changing the features would
# be like using PCA to reduce 300 columns to 30. In either case, the
# original column's units have been altered or no longer exist, so it's
# up to you to rename your columns after ANY transformation. Due to
# this, SKLearn returns an NDArray from *transform() calls.
def scaleFeatures(df):
# SKLearn contains many methods for transforming your features by
# scaling them (this is a type of pre-processing):
# RobustScaler, Normalizer, MinMaxScaler, MaxAbsScaler, StandardScaler...
# http://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing
#
# However in order to be effective at PCA, there are a few requirements
# that must be met, and which will drive the selection of your scaler.
# PCA required your data is standardized -- in other words it's mean is
# equal to 0, and it has ~unit variance.
#
# SKLearn's regular Normalizer doesn't zero out the mean of your data,
# it only clamps it, so it's inappropriate to use here (depending on
# your data). MinMaxScaler and MaxAbsScaler both fail to set a unit
# variance, so you won't be using them either. RobustScaler can work,
# again depending on your data (watch for outliers). For these reasons
# we're going to use the StandardScaler. Get familiar with it by visiting
# these two websites:
#
# http://scikit-learn.org/stable/modules/preprocessing.html#preprocessing-scaler
#
# http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html#sklearn.preprocessing.StandardScaler
#
# ---------
# Feature scaling is the type of transformation that only changes the
# scale and not number of features, so we'll use the original dataset
# column names. However we'll keep in mind that the _units_ have been
# altered:
scaled = preprocessing.StandardScaler().fit_transform(df)
scaled = pd.DataFrame(scaled, columns=df.columns)
print("New Variances:\n", scaled.var())
print("New Describe:\n", scaled.describe())
return scaled
def drawVectors(transformed_features, components_, columns, plt, scaled):
if not scaled:
return plt.axes() # No cheating ;-)
num_columns = len(columns)
# This funtion will project your *original* feature (columns)
# onto your principal component feature-space, so that you can
# visualize how "important" each one was in the
# multi-dimensional scaling
# Scale the principal components by the max value in
# the transformed set belonging to that component
xvector = components_[0] * max(transformed_features[:,0])
yvector = components_[1] * max(transformed_features[:,1])
## visualize projections
# Sort each column by it's length. These are your *original*
# columns, not the principal components.
important_features = { columns[i] : math.sqrt(xvector[i]**2 + yvector[i]**2) for i in range(num_columns) }
important_features = sorted(zip(important_features.values(), important_features.keys()), reverse=True)
print("Features by importance:\n", important_features)
ax = plt.axes()
for i in range(num_columns):
# Use an arrow to project each original feature as a
# labeled vector on your principal component axes
plt.arrow(0, 0, xvector[i], yvector[i], color='b', width=0.0005, head_width=0.02, alpha=0.75)
plt.text(xvector[i]*1.2, yvector[i]*1.2, list(columns)[i], color='b', alpha=0.75)
return ax
| mit |
Myasuka/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
mhue/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
winklerand/pandas | pandas/core/indexes/datetimes.py | 1 | 82224 | # pylint: disable=E1101
from __future__ import division
import operator
import warnings
from datetime import time, datetime, timedelta
import numpy as np
from pytz import utc
from pandas.core.base import _shared_docs
from pandas.core.dtypes.common import (
_NS_DTYPE, _INT64_DTYPE,
is_object_dtype, is_datetime64_dtype,
is_datetimetz, is_dtype_equal,
is_integer, is_float,
is_integer_dtype,
is_datetime64_ns_dtype,
is_period_dtype,
is_bool_dtype,
is_string_dtype,
is_string_like,
is_list_like,
is_scalar,
pandas_dtype,
_ensure_int64)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
import pandas.core.dtypes.concat as _concat
from pandas.errors import PerformanceWarning
from pandas.core.common import _values_from_object, _maybe_box
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.numeric import Int64Index, Float64Index
import pandas.compat as compat
from pandas.tseries.frequencies import (
to_offset, get_period_alias,
Resolution)
from pandas.core.indexes.datetimelike import (
DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin)
from pandas.tseries.offsets import (
DateOffset, generate_range, Tick, CDay, prefix_mapping)
from pandas.core.tools.datetimes import (
parse_time_string, normalize_date, to_time)
from pandas.core.tools.timedeltas import to_timedelta
from pandas.util._decorators import (Appender, cache_readonly,
deprecate_kwarg, Substitution)
import pandas.core.common as com
import pandas.tseries.offsets as offsets
import pandas.core.tools.datetimes as tools
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timestamp, period as libperiod)
from pandas._libs.tslibs import timezones, conversion, fields
# -------- some conversion wrapper functions
def _field_accessor(name, field, docstring=None):
def f(self):
values = self.asi8
if self.tz is not None:
if self.tz is not utc:
values = self._local_timestamps()
if field in self._bool_ops:
if field in ['is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end',
'is_year_start', 'is_year_end']:
month_kw = (self.freq.kwds.get('startingMonth',
self.freq.kwds.get('month', 12))
if self.freq else 12)
result = fields.get_start_end_field(values, field,
self.freqstr, month_kw)
else:
result = fields.get_date_field(values, field)
# these return a boolean by-definition
return result
if field in self._object_ops:
result = fields.get_date_name_field(values, field)
result = self._maybe_mask_results(result)
else:
result = fields.get_date_field(values, field)
result = self._maybe_mask_results(result, convert='float64')
return Index(result, name=self.name)
f.__name__ = name
f.__doc__ = docstring
return property(f)
def _dt_index_cmp(opname, cls, nat_result=False):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
def wrapper(self, other):
func = getattr(super(DatetimeIndex, self), opname)
if (isinstance(other, datetime) or
isinstance(other, compat.string_types)):
other = _to_m8(other, tz=self.tz)
result = func(other)
if isna(other):
result.fill(nat_result)
else:
if isinstance(other, list):
other = DatetimeIndex(other)
elif not isinstance(other, (np.ndarray, Index, ABCSeries)):
other = _ensure_datetime64(other)
result = func(np.asarray(other))
result = _values_from_object(result)
if isinstance(other, Index):
o_mask = other.values.view('i8') == libts.iNaT
else:
o_mask = other.view('i8') == libts.iNaT
if o_mask.any():
result[o_mask] = nat_result
if self.hasnans:
result[self._isnan] = nat_result
# support of bool dtype indexers
if is_bool_dtype(result):
return result
return Index(result)
return compat.set_function_name(wrapper, opname, cls)
def _ensure_datetime64(other):
if isinstance(other, np.datetime64):
return other
raise TypeError('%s type object %s' % (type(other), str(other)))
_midnight = time(0, 0)
def _new_DatetimeIndex(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__ """
# data are already in UTC
# so need to localize
tz = d.pop('tz', None)
result = cls.__new__(cls, verify_integrity=False, **d)
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
class DatetimeIndex(DatelikeOps, TimelikeOps, DatetimeIndexOpsMixin,
Int64Index):
"""
Immutable ndarray of datetime64 data, represented internally as int64, and
which can be boxed to Timestamp objects that are subclasses of datetime and
carry metadata such as frequency information.
Parameters
----------
data : array-like (1-dimensional), optional
Optional datetime-like data to construct index with
copy : bool
Make a copy of input ndarray
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects
start : starting value, datetime-like, optional
If data is None, start is used as the start point in generating regular
timestamp data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end time, datetime-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
tz : pytz.timezone or dateutil.tz.tzfile
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for ambiguous
times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous times
infer_dst : boolean, default False
.. deprecated:: 0.15.0
Attempt to infer fall dst-transition hours based on order
name : object
Name to be stored in the index
Attributes
----------
year
month
day
hour
minute
second
microsecond
nanosecond
date
time
dayofyear
weekofyear
week
dayofweek
weekday
weekday_name
quarter
tz
freq
freqstr
is_month_start
is_month_end
is_quarter_start
is_quarter_end
is_year_start
is_year_end
is_leap_year
inferred_freq
Methods
-------
normalize
strftime
snap
tz_convert
tz_localize
round
floor
ceil
to_datetime
to_period
to_perioddelta
to_pydatetime
to_series
to_frame
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
See Also
---------
Index : The base pandas Index type
TimedeltaIndex : Index of timedelta64 data
PeriodIndex : Index of Period data
"""
_typ = 'datetimeindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(joinf, dtype='M8[ns]',
**kwargs)
_inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(libjoin.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique_int64, with_indexers=False)
_arrmap = None
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
cls.__eq__ = _dt_index_cmp('__eq__', cls)
cls.__ne__ = _dt_index_cmp('__ne__', cls, nat_result=True)
cls.__lt__ = _dt_index_cmp('__lt__', cls)
cls.__gt__ = _dt_index_cmp('__gt__', cls)
cls.__le__ = _dt_index_cmp('__le__', cls)
cls.__ge__ = _dt_index_cmp('__ge__', cls)
_engine_type = libindex.DatetimeEngine
tz = None
offset = None
_comparables = ['name', 'freqstr', 'tz']
_attributes = ['name', 'freq', 'tz']
# define my properties & methods for delegation
_bool_ops = ['is_month_start', 'is_month_end',
'is_quarter_start', 'is_quarter_end', 'is_year_start',
'is_year_end', 'is_leap_year']
_object_ops = ['weekday_name', 'freq', 'tz']
_field_ops = ['year', 'month', 'day', 'hour', 'minute', 'second',
'weekofyear', 'week', 'weekday', 'dayofweek',
'dayofyear', 'quarter', 'days_in_month',
'daysinmonth', 'microsecond',
'nanosecond']
_other_ops = ['date', 'time']
_datetimelike_ops = _field_ops + _object_ops + _bool_ops + _other_ops
_datetimelike_methods = ['to_period', 'tz_localize',
'tz_convert',
'normalize', 'strftime', 'round', 'floor',
'ceil']
_is_numeric_dtype = False
_infer_as_myclass = True
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None, tz=None,
verify_integrity=True, normalize=False,
closed=None, ambiguous='raise', dtype=None, **kwargs):
# This allows to later ensure that the 'copy' parameter is honored:
if isinstance(data, Index):
ref_to_data = data._data
else:
ref_to_data = data
if name is None and hasattr(data, 'name'):
name = data.name
dayfirst = kwargs.pop('dayfirst', None)
yearfirst = kwargs.pop('yearfirst', None)
freq_infer = False
if not isinstance(freq, DateOffset):
# if a passed freq is None, don't infer automatically
if freq != 'infer':
freq = to_offset(freq)
else:
freq_infer = True
freq = None
if periods is not None:
if is_float(periods):
periods = int(periods)
elif not is_integer(periods):
msg = 'periods must be a number, got {periods}'
raise TypeError(msg.format(periods=periods))
if data is None and freq is None:
raise ValueError("Must provide freq argument if no data is "
"supplied")
# if dtype has an embeded tz, capture it
if dtype is not None:
try:
dtype = DatetimeTZDtype.construct_from_string(dtype)
dtz = getattr(dtype, 'tz', None)
if dtz is not None:
if tz is not None and str(tz) != str(dtz):
raise ValueError("cannot supply both a tz and a dtype"
" with a tz")
tz = dtz
except TypeError:
pass
if data is None:
return cls._generate(start, end, periods, name, freq,
tz=tz, normalize=normalize, closed=closed,
ambiguous=ambiguous)
if not isinstance(data, (np.ndarray, Index, ABCSeries)):
if is_scalar(data):
raise ValueError('DatetimeIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
data = np.asarray(data, dtype='O')
elif isinstance(data, ABCSeries):
data = data._values
# data must be Index or np.ndarray here
if not (is_datetime64_dtype(data) or is_datetimetz(data) or
is_integer_dtype(data)):
data = tools.to_datetime(data, dayfirst=dayfirst,
yearfirst=yearfirst)
if issubclass(data.dtype.type, np.datetime64) or is_datetimetz(data):
if isinstance(data, DatetimeIndex):
if tz is None:
tz = data.tz
elif data.tz is None:
data = data.tz_localize(tz, ambiguous=ambiguous)
else:
# the tz's must match
if str(tz) != str(data.tz):
msg = ('data is already tz-aware {0}, unable to '
'set specified tz: {1}')
raise TypeError(msg.format(data.tz, tz))
subarr = data.values
if freq is None:
freq = data.offset
verify_integrity = False
else:
if data.dtype != _NS_DTYPE:
subarr = conversion.ensure_datetime64ns(data)
else:
subarr = data
else:
# must be integer dtype otherwise
if isinstance(data, Int64Index):
raise TypeError('cannot convert Int64Index->DatetimeIndex')
if data.dtype != _INT64_DTYPE:
data = data.astype(np.int64)
subarr = data.view(_NS_DTYPE)
if isinstance(subarr, DatetimeIndex):
if tz is None:
tz = subarr.tz
else:
if tz is not None:
tz = timezones.maybe_get_tz(tz)
if (not isinstance(data, DatetimeIndex) or
getattr(data, 'tz', None) is None):
# Convert tz-naive to UTC
ints = subarr.view('i8')
subarr = conversion.tz_localize_to_utc(ints, tz,
ambiguous=ambiguous)
subarr = subarr.view(_NS_DTYPE)
subarr = cls._simple_new(subarr, name=name, freq=freq, tz=tz)
if dtype is not None:
if not is_dtype_equal(subarr.dtype, dtype):
# dtype must be coerced to DatetimeTZDtype above
if subarr.tz is not None:
raise ValueError("cannot localize from non-UTC data")
if verify_integrity and len(subarr) > 0:
if freq is not None and not freq_infer:
inferred = subarr.inferred_freq
if inferred != freq.freqstr:
on_freq = cls._generate(subarr[0], None, len(subarr), None,
freq, tz=tz, ambiguous=ambiguous)
if not np.array_equal(subarr.asi8, on_freq.asi8):
raise ValueError('Inferred frequency {0} from passed '
'dates does not conform to passed '
'frequency {1}'
.format(inferred, freq.freqstr))
if freq_infer:
inferred = subarr.inferred_freq
if inferred:
subarr.offset = to_offset(inferred)
return subarr._deepcopy_if_needed(ref_to_data, copy)
@classmethod
def _generate(cls, start, end, periods, name, offset,
tz=None, normalize=False, ambiguous='raise', closed=None):
if com._count_not_none(start, end, periods) != 2:
raise ValueError('Of the three parameters: start, end, and '
'periods, exactly two must be specified')
_normalized = True
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
left_closed = False
right_closed = False
if start is None and end is None:
if closed is not None:
raise ValueError("Closed has to be None if not both of start"
"and end are defined")
if closed is None:
left_closed = True
right_closed = True
elif closed == "left":
left_closed = True
elif closed == "right":
right_closed = True
else:
raise ValueError("Closed has to be either 'left', 'right' or None")
try:
inferred_tz = timezones.infer_tzinfo(start, end)
except Exception:
raise TypeError('Start and end cannot both be tz-aware with '
'different timezones')
inferred_tz = timezones.maybe_get_tz(inferred_tz)
# these may need to be localized
tz = timezones.maybe_get_tz(tz)
if tz is not None:
date = start or end
if date.tzinfo is not None and hasattr(tz, 'localize'):
tz = tz.localize(date.replace(tzinfo=None)).tzinfo
if tz is not None and inferred_tz is not None:
if not (timezones.get_timezone(inferred_tz) ==
timezones.get_timezone(tz)):
raise AssertionError("Inferred time zone not equal to passed "
"time zone")
elif inferred_tz is not None:
tz = inferred_tz
if start is not None:
if normalize:
start = normalize_date(start)
_normalized = True
else:
_normalized = _normalized and start.time() == _midnight
if end is not None:
if normalize:
end = normalize_date(end)
_normalized = True
else:
_normalized = _normalized and end.time() == _midnight
if hasattr(offset, 'delta') and offset != offsets.Day():
if inferred_tz is None and tz is not None:
# naive dates
if start is not None and start.tz is None:
start = start.tz_localize(tz, ambiguous=False)
if end is not None and end.tz is None:
end = end.tz_localize(tz, ambiguous=False)
if start and end:
if start.tz is None and end.tz is not None:
start = start.tz_localize(end.tz, ambiguous=False)
if end.tz is None and start.tz is not None:
end = end.tz_localize(start.tz, ambiguous=False)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
else:
if tz is not None:
# naive dates
if start is not None and start.tz is not None:
start = start.replace(tzinfo=None)
if end is not None and end.tz is not None:
end = end.replace(tzinfo=None)
if start and end:
if start.tz is None and end.tz is not None:
end = end.replace(tzinfo=None)
if end.tz is None and start.tz is not None:
start = start.replace(tzinfo=None)
if _use_cached_range(offset, _normalized, start, end):
index = cls._cached_range(start, end, periods=periods,
offset=offset, name=name)
else:
index = _generate_regular_range(start, end, periods, offset)
if tz is not None and getattr(index, 'tz', None) is None:
index = conversion.tz_localize_to_utc(_ensure_int64(index), tz,
ambiguous=ambiguous)
index = index.view(_NS_DTYPE)
# index is localized datetime64 array -> have to convert
# start/end as well to compare
if start is not None:
start = start.tz_localize(tz).asm8
if end is not None:
end = end.tz_localize(tz).asm8
if not left_closed and len(index) and index[0] == start:
index = index[1:]
if not right_closed and len(index) and index[-1] == end:
index = index[:-1]
index = cls._simple_new(index, name=name, freq=offset, tz=tz)
return index
@property
def _box_func(self):
return lambda x: Timestamp(x, freq=self.offset, tz=self.tz)
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
if self._has_same_tz(value):
return _to_m8(value)
raise ValueError('Passed item and index have different timezone')
def _local_timestamps(self):
if self.is_monotonic:
return conversion.tz_convert(self.asi8, utc, self.tz)
else:
values = self.asi8
indexer = values.argsort()
result = conversion.tz_convert(values.take(indexer), utc, self.tz)
n = len(indexer)
reverse = np.empty(n, dtype=np.int_)
reverse.put(indexer, np.arange(n))
return result.take(reverse)
@classmethod
def _simple_new(cls, values, name=None, freq=None, tz=None,
dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
"""
if getattr(values, 'dtype', None) is None:
# empty, but with dtype compat
if values is None:
values = np.empty(0, dtype=_NS_DTYPE)
return cls(values, name=name, freq=freq, tz=tz,
dtype=dtype, **kwargs)
values = np.array(values, copy=False)
if is_object_dtype(values):
return cls(values, name=name, freq=freq, tz=tz,
dtype=dtype, **kwargs).values
elif not is_datetime64_dtype(values):
values = _ensure_int64(values).view(_NS_DTYPE)
result = object.__new__(cls)
result._data = values
result.name = name
result.offset = freq
result.tz = timezones.maybe_get_tz(tz)
result._reset_identity()
return result
@property
def tzinfo(self):
"""
Alias for tz attribute
"""
return self.tz
@cache_readonly
def _timezone(self):
""" Comparable timezone both for pytz / dateutil"""
return timezones.get_timezone(self.tzinfo)
def _has_same_tz(self, other):
zzone = self._timezone
# vzone sholdn't be None if value is non-datetime like
if isinstance(other, np.datetime64):
# convert to Timestamp as np.datetime64 doesn't have tz attr
other = Timestamp(other)
vzone = timezones.get_timezone(getattr(other, 'tzinfo', '__no_tz__'))
return zzone == vzone
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
if start is None and end is None:
# I somewhat believe this should never be raised externally
raise TypeError('Must specify either start or end.')
if start is not None:
start = Timestamp(start)
if end is not None:
end = Timestamp(end)
if (start is None or end is None) and periods is None:
raise TypeError(
'Must either specify period or provide both start and end.')
if offset is None:
# This can't happen with external-facing code
raise TypeError('Must provide offset.')
drc = _daterange_cache
if offset not in _daterange_cache:
xdr = generate_range(offset=offset, start=_CACHE_START,
end=_CACHE_END)
arr = tools.to_datetime(list(xdr), box=False)
cachedRange = DatetimeIndex._simple_new(arr)
cachedRange.offset = offset
cachedRange.tz = None
cachedRange.name = None
drc[offset] = cachedRange
else:
cachedRange = drc[offset]
if start is None:
if not isinstance(end, Timestamp):
raise AssertionError('end must be an instance of Timestamp')
end = offset.rollback(end)
endLoc = cachedRange.get_loc(end) + 1
startLoc = endLoc - periods
elif end is None:
if not isinstance(start, Timestamp):
raise AssertionError('start must be an instance of Timestamp')
start = offset.rollforward(start)
startLoc = cachedRange.get_loc(start)
endLoc = startLoc + periods
else:
if not offset.onOffset(start):
start = offset.rollforward(start)
if not offset.onOffset(end):
end = offset.rollback(end)
startLoc = cachedRange.get_loc(start)
endLoc = cachedRange.get_loc(end) + 1
indexSlice = cachedRange[startLoc:endLoc]
indexSlice.name = name
indexSlice.offset = offset
return indexSlice
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return libts.ints_to_pydatetime(self.asi8, self.tz)
@cache_readonly
def _is_dates_only(self):
from pandas.io.formats.format import _is_dates_only
return _is_dates_only(self.values)
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_datetime64
formatter = _get_format_datetime64(is_dates_only=self._is_dates_only)
return lambda x: "'%s'" % formatter(x, tz=self.tz)
def __reduce__(self):
# we use a special reudce here because we need
# to simply set the .tz (and not reinterpret it)
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_DatetimeIndex, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(DatetimeIndex, self).__setstate__(state)
elif isinstance(state, tuple):
# < 0.15 compat
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
self.offset = own_state[1]
self.tz = own_state[2]
# provide numpy < 1.7 compat
if nd_state[2] == 'M8[us]':
new_state = np.ndarray.__reduce__(data.astype('M8[ns]'))
np.ndarray.__setstate__(data, new_state[2])
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _add_datelike(self, other):
# adding a timedeltaindex to a datetimelike
if other is libts.NaT:
return self._nat_new(box=True)
raise TypeError("cannot add {0} and {1}"
.format(type(self).__name__,
type(other).__name__))
def _sub_datelike(self, other):
# subtract a datetime from myself, yielding a TimedeltaIndex
from pandas import TimedeltaIndex
if isinstance(other, DatetimeIndex):
# require tz compat
if not self._has_same_tz(other):
raise TypeError("DatetimeIndex subtraction must have the same "
"timezones or no timezones")
result = self._sub_datelike_dti(other)
elif isinstance(other, (datetime, np.datetime64)):
other = Timestamp(other)
if other is libts.NaT:
result = self._nat_new(box=False)
# require tz compat
elif not self._has_same_tz(other):
raise TypeError("Timestamp subtraction must have the same "
"timezones or no timezones")
else:
i8 = self.asi8
result = checked_add_with_arr(i8, -other.value,
arr_mask=self._isnan)
result = self._maybe_mask_results(result,
fill_value=libts.iNaT)
else:
raise TypeError("cannot subtract DatetimeIndex and {typ}"
.format(typ=type(other).__name__))
return TimedeltaIndex(result, name=self.name, copy=False)
def _sub_datelike_dti(self, other):
"""subtraction of two DatetimeIndexes"""
if not len(self) == len(other):
raise ValueError("cannot add indices of unequal length")
self_i8 = self.asi8
other_i8 = other.asi8
new_values = self_i8 - other_i8
if self.hasnans or other.hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = libts.iNaT
return new_values.view('i8')
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
def _add_delta(self, delta):
from pandas import TimedeltaIndex
name = self.name
if isinstance(delta, (Tick, timedelta, np.timedelta64)):
new_values = self._add_delta_td(delta)
elif isinstance(delta, TimedeltaIndex):
new_values = self._add_delta_tdi(delta)
# update name when delta is Index
name = com._maybe_match_name(self, delta)
elif isinstance(delta, DateOffset):
new_values = self._add_offset(delta).asi8
else:
new_values = self.astype('O') + delta
tz = 'UTC' if self.tz is not None else None
result = DatetimeIndex(new_values, tz=tz, name=name, freq='infer')
if self.tz is not None and self.tz is not utc:
result = result.tz_convert(self.tz)
return result
def _add_offset(self, offset):
try:
if self.tz is not None:
values = self.tz_localize(None)
else:
values = self
result = offset.apply_index(values)
if self.tz is not None:
result = result.tz_localize(self.tz)
return result
except NotImplementedError:
warnings.warn("Non-vectorized DateOffset being applied to Series "
"or DatetimeIndex", PerformanceWarning)
return self.astype('O') + offset
def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs):
from pandas.io.formats.format import _get_format_datetime64_from_values
format = _get_format_datetime64_from_values(self, date_format)
return libts.format_array_from_datetime(self.asi8,
tz=self.tz,
format=format,
na_rep=na_rep)
def to_datetime(self, dayfirst=False):
return self.copy()
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self.asobject
elif is_integer_dtype(dtype):
return Index(self.values.astype('i8', copy=copy), name=self.name,
dtype='i8')
elif is_datetime64_ns_dtype(dtype):
if self.tz is not None:
return self.tz_convert('UTC').tz_localize(None)
elif copy is True:
return self.copy()
return self
elif is_string_dtype(dtype):
return Index(self.format(), name=self.name, dtype=object)
elif is_period_dtype(dtype):
return self.to_period(freq=dtype.freq)
raise TypeError('Cannot cast DatetimeIndex to dtype %s' % dtype)
def _get_time_micros(self):
values = self.asi8
if self.tz is not None and self.tz is not utc:
values = self._local_timestamps()
return fields.get_time_micros(values)
def to_series(self, keep_tz=False):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
keep_tz : optional, defaults False.
return the data keeping the timezone.
If keep_tz is True:
If the timezone is not set, the resulting
Series will have a datetime64[ns] dtype.
Otherwise the Series will have an datetime64[ns, tz] dtype; the
tz will be preserved.
If keep_tz is False:
Series will have a datetime64[ns] dtype. TZ aware
objects will have the tz removed.
Returns
-------
Series
"""
from pandas import Series
return Series(self._to_embed(keep_tz),
index=self._shallow_copy(),
name=self.name)
def _to_embed(self, keep_tz=False, dtype=None):
"""
return an array repr of this object, potentially casting to object
This is for internal compat
"""
if dtype is not None:
return self.astype(dtype)._to_embed(keep_tz=keep_tz)
if keep_tz and self.tz is not None:
# preserve the tz & copy
return self.copy(deep=True)
return self.values.copy()
def to_pydatetime(self):
"""
Return DatetimeIndex as object ndarray of datetime.datetime objects
Returns
-------
datetimes : ndarray
"""
return libts.ints_to_pydatetime(self.asi8, tz=self.tz)
def to_period(self, freq=None):
"""
Cast to PeriodIndex at a particular frequency
"""
from pandas.core.indexes.period import PeriodIndex
if freq is None:
freq = self.freqstr or self.inferred_freq
if freq is None:
msg = ("You must pass a freq argument as "
"current index has none.")
raise ValueError(msg)
freq = get_period_alias(freq)
return PeriodIndex(self.values, name=self.name, freq=freq, tz=self.tz)
def snap(self, freq='S'):
"""
Snap time stamps to nearest occurring frequency
"""
# Superdumb, punting on any optimizing
freq = to_offset(freq)
snapped = np.empty(len(self), dtype=_NS_DTYPE)
for i, v in enumerate(self):
s = v
if not freq.onOffset(s):
t0 = freq.rollback(s)
t1 = freq.rollforward(s)
if abs(s - t0) < abs(t1 - s):
s = t0
else:
s = t1
snapped[i] = s
# we know it conforms; skip check
return DatetimeIndex(snapped, freq=freq, verify_integrity=False)
def union(self, other):
"""
Specialized union for DatetimeIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = self._maybe_utc_convert(other)
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, DatetimeIndex):
result.tz = this.tz
if (result.freq is None and
(this.freq is not None or other.freq is not None)):
result.offset = to_offset(result.inferred_freq)
return result
def to_perioddelta(self, freq):
"""
Calculates TimedeltaIndex of difference between index
values and index converted to PeriodIndex at specified
freq. Used for vectorized offsets
Parameters
----------
freq : Period frequency
Returns
-------
y : TimedeltaIndex
"""
return to_timedelta(self.asi8 - self.to_period(freq)
.to_timestamp().asi8)
def union_many(self, others):
"""
A bit of a hack to accelerate unioning a collection of indexes
"""
this = self
for other in others:
if not isinstance(this, DatetimeIndex):
this = Index.union(this, other)
continue
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except TypeError:
pass
this, other = this._maybe_utc_convert(other)
if this._can_fast_union(other):
this = this._fast_union(other)
else:
tz = this.tz
this = Index.union(this, other)
if isinstance(this, DatetimeIndex):
this.tz = tz
if this.freq is None:
this.offset = to_offset(this.inferred_freq)
return this
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
"""
See Index.join
"""
if (not isinstance(other, DatetimeIndex) and len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer',
'mixed-integer-float', 'mixed')):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
this, other = self._maybe_utc_convert(other)
return Index.join(this, other, how=how, level=level,
return_indexers=return_indexers, sort=sort)
def _maybe_utc_convert(self, other):
this = self
if isinstance(other, DatetimeIndex):
if self.tz is not None:
if other.tz is None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
elif other.tz is not None:
raise TypeError('Cannot join tz-naive with tz-aware '
'DatetimeIndex')
if self.tz != other.tz:
this = self.tz_convert('UTC')
other = other.tz_convert('UTC')
return this, other
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, DatetimeIndex) and
self.offset == other.offset and
self._can_fast_union(other)):
joined = self._shallow_copy(joined)
joined.name = name
return joined
else:
tz = getattr(other, 'tz', None)
return self._simple_new(joined, name, tz=tz)
def _can_fast_union(self, other):
if not isinstance(other, DatetimeIndex):
return False
offset = self.offset
if offset is None or offset != other.offset:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
try:
return (right_start == left_end + offset) or right_start in left
except (ValueError):
# if we are comparing an offset that does not propagate timezones
# this will raise
return False
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_start, left_end = left[0], left[-1]
right_end = right[-1]
if not self.offset._should_cache():
# concatenate dates
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = _concat._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
else:
return type(self)(start=left_start,
end=max(left_end, right_end),
freq=left.offset)
def __iter__(self):
"""
Return an iterator over the boxed values
Returns
-------
Timestamps : ndarray
"""
# convert in chunks of 10k for efficiency
data = self.asi8
length = len(self)
chunksize = 10000
chunks = int(length / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, length)
converted = libts.ints_to_pydatetime(data[start_i:end_i],
tz=self.tz, freq=self.freq,
box="timestamp")
for v in converted:
yield v
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
if self.tz != other.tz:
raise ValueError('Passed item and index have different timezone')
return self._simple_new(result, name=name, freq=None, tz=self.tz)
def intersection(self, other):
"""
Specialized intersection for DatetimeIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : DatetimeIndex or array-like
Returns
-------
y : Index or DatetimeIndex
"""
self._assert_can_do_setop(other)
if not isinstance(other, DatetimeIndex):
try:
other = DatetimeIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
if isinstance(result, DatetimeIndex):
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
elif (other.offset is None or self.offset is None or
other.offset != self.offset or
not other.offset.isAnchored() or
(not self.is_monotonic or not other.is_monotonic)):
result = Index.intersection(self, other)
result = self._shallow_copy(result._values, name=result.name,
tz=result.tz, freq=None)
if result.freq is None:
result.offset = to_offset(result.inferred_freq)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _parsed_string_to_bounds(self, reso, parsed):
"""
Calculate datetime bounds for parsed time string and its resolution.
Parameters
----------
reso : Resolution
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
Returns
-------
lower, upper: pd.Timestamp
"""
if reso == 'year':
return (Timestamp(datetime(parsed.year, 1, 1), tz=self.tz),
Timestamp(datetime(parsed.year, 12, 31, 23,
59, 59, 999999), tz=self.tz))
elif reso == 'month':
d = libts.monthrange(parsed.year, parsed.month)[1]
return (Timestamp(datetime(parsed.year, parsed.month, 1),
tz=self.tz),
Timestamp(datetime(parsed.year, parsed.month, d, 23,
59, 59, 999999), tz=self.tz))
elif reso == 'quarter':
qe = (((parsed.month - 1) + 2) % 12) + 1 # two months ahead
d = libts.monthrange(parsed.year, qe)[1] # at end of month
return (Timestamp(datetime(parsed.year, parsed.month, 1),
tz=self.tz),
Timestamp(datetime(parsed.year, qe, d, 23, 59,
59, 999999), tz=self.tz))
elif reso == 'day':
st = datetime(parsed.year, parsed.month, parsed.day)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Day(),
tz=self.tz).value - 1))
elif reso == 'hour':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Hour(),
tz=self.tz).value - 1))
elif reso == 'minute':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Minute(),
tz=self.tz).value - 1))
elif reso == 'second':
st = datetime(parsed.year, parsed.month, parsed.day,
hour=parsed.hour, minute=parsed.minute,
second=parsed.second)
return (Timestamp(st, tz=self.tz),
Timestamp(Timestamp(st + offsets.Second(),
tz=self.tz).value - 1))
elif reso == 'microsecond':
st = datetime(parsed.year, parsed.month, parsed.day,
parsed.hour, parsed.minute, parsed.second,
parsed.microsecond)
return (Timestamp(st, tz=self.tz), Timestamp(st, tz=self.tz))
else:
raise KeyError
def _partial_date_slice(self, reso, parsed, use_lhs=True, use_rhs=True):
is_monotonic = self.is_monotonic
if (is_monotonic and reso in ['day', 'hour', 'minute', 'second'] and
self._resolution >= Resolution.get_reso(reso)):
# These resolution/monotonicity validations came from GH3931,
# GH3452 and GH2369.
# See also GH14826
raise KeyError
if reso == 'microsecond':
# _partial_date_slice doesn't allow microsecond resolution, but
# _parsed_string_to_bounds allows it.
raise KeyError
t1, t2 = self._parsed_string_to_bounds(reso, parsed)
stamps = self.asi8
if is_monotonic:
# we are out of range
if (len(stamps) and ((use_lhs and t1.value < stamps[0] and
t2.value < stamps[0]) or
((use_rhs and t1.value > stamps[-1] and
t2.value > stamps[-1])))):
raise KeyError
# a monotonic (sorted) series can be sliced
left = stamps.searchsorted(
t1.value, side='left') if use_lhs else None
right = stamps.searchsorted(
t2.value, side='right') if use_rhs else None
return slice(left, right)
lhs_mask = (stamps >= t1.value) if use_lhs else True
rhs_mask = (stamps <= t2.value) if use_rhs else True
# try to find a the dates
return (lhs_mask & rhs_mask).nonzero()[0]
def _maybe_promote(self, other):
if other.inferred_type == 'date':
other = DatetimeIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if isinstance(key, datetime):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
return self.get_value_maybe_box(series, key)
if isinstance(key, time):
locs = self.indexer_at_time(key)
return series.take(locs)
try:
return _maybe_box(self, Index.get_value(self, series, key),
series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
# needed to localize naive datetimes
if self.tz is not None:
key = Timestamp(key, tz=self.tz)
elif not isinstance(key, Timestamp):
key = Timestamp(key)
values = self._engine.get_value(_values_from_object(series),
key, tz=self.tz)
return _maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance, np.asarray(key))
if isinstance(key, datetime):
# needed to localize naive datetimes
key = Timestamp(key, tz=self.tz)
return Index.get_loc(self, key, method, tolerance)
if isinstance(key, time):
if method is not None:
raise NotImplementedError('cannot yet lookup inexact labels '
'when key is a time object')
return self.indexer_at_time(key)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timestamp(key, tz=self.tz)
return Index.get_loc(self, stamp, method, tolerance)
except KeyError:
raise KeyError(key)
except ValueError as e:
# list-like tolerance size must match target index size
if 'list-like' in str(e):
raise e
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to datetime according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
assert kind in ['ix', 'loc', 'getitem', None]
if is_float(label) or isinstance(label, time) or is_integer(label):
self._invalid_indexer('slice', label)
if isinstance(label, compat.string_types):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(label, freq)
lower, upper = self._parsed_string_to_bounds(reso, parsed)
# lower, upper form the half-open interval:
# [parsed, parsed + 1 freq)
# because label may be passed to searchsorted
# the bounds need swapped if index is reverse sorted and has a
# length > 1 (is_monotonic_decreasing gives True for empty
# and length 1 index)
if self._is_strictly_monotonic_decreasing and len(self) > 1:
return upper if side == 'left' else lower
return lower if side == 'left' else upper
else:
return label
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
freq = getattr(self, 'freqstr',
getattr(self, 'inferred_freq', None))
_, parsed, reso = parse_time_string(key, freq)
loc = self._partial_date_slice(reso, parsed, use_lhs=use_lhs,
use_rhs=use_rhs)
return loc
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError('Must have step size of 1 with time slices')
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError('Cannot mix time and non-time slice keys')
try:
return Index.slice_indexer(self, start, end, step, kind=kind)
except KeyError:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if ((start is None or isinstance(start, compat.string_types)) and
(end is None or isinstance(end, compat.string_types))):
mask = True
if start is not None:
start_casted = self._maybe_cast_slice_bound(
start, 'left', kind)
mask = start_casted <= self
if end is not None:
end_casted = self._maybe_cast_slice_bound(
end, 'right', kind)
mask = (self <= end_casted) & mask
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
@property
def freq(self):
"""get/set the frequency of the Index"""
return self.offset
@freq.setter
def freq(self, value):
"""get/set the frequency of the Index"""
self.offset = value
year = _field_accessor('year', 'Y', "The year of the datetime")
month = _field_accessor('month', 'M',
"The month as January=1, December=12")
day = _field_accessor('day', 'D', "The days of the datetime")
hour = _field_accessor('hour', 'h', "The hours of the datetime")
minute = _field_accessor('minute', 'm', "The minutes of the datetime")
second = _field_accessor('second', 's', "The seconds of the datetime")
microsecond = _field_accessor('microsecond', 'us',
"The microseconds of the datetime")
nanosecond = _field_accessor('nanosecond', 'ns',
"The nanoseconds of the datetime")
weekofyear = _field_accessor('weekofyear', 'woy',
"The week ordinal of the year")
week = weekofyear
dayofweek = _field_accessor('dayofweek', 'dow',
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
weekday_name = _field_accessor(
'weekday_name',
'weekday_name',
"The name of day in a week (ex: Friday)\n\n.. versionadded:: 0.18.1")
dayofyear = _field_accessor('dayofyear', 'doy',
"The ordinal day of the year")
quarter = _field_accessor('quarter', 'q', "The quarter of the date")
days_in_month = _field_accessor(
'days_in_month',
'dim',
"The number of days in the month")
daysinmonth = days_in_month
is_month_start = _field_accessor(
'is_month_start',
'is_month_start',
"Logical indicating if first day of month (defined by frequency)")
is_month_end = _field_accessor(
'is_month_end',
'is_month_end',
"Logical indicating if last day of month (defined by frequency)")
is_quarter_start = _field_accessor(
'is_quarter_start',
'is_quarter_start',
"Logical indicating if first day of quarter (defined by frequency)")
is_quarter_end = _field_accessor(
'is_quarter_end',
'is_quarter_end',
"Logical indicating if last day of quarter (defined by frequency)")
is_year_start = _field_accessor(
'is_year_start',
'is_year_start',
"Logical indicating if first day of year (defined by frequency)")
is_year_end = _field_accessor(
'is_year_end',
'is_year_end',
"Logical indicating if last day of year (defined by frequency)")
is_leap_year = _field_accessor(
'is_leap_year',
'is_leap_year',
"Logical indicating if the date belongs to a leap year")
@property
def time(self):
"""
Returns numpy array of datetime.time. The time part of the Timestamps.
"""
return self._maybe_mask_results(libalgos.arrmap_object(
self.asobject.values,
lambda x: np.nan if x is libts.NaT else x.time()))
@property
def date(self):
"""
Returns numpy array of python datetime.date objects (namely, the date
part of Timestamps without timezone information).
"""
return libts.ints_to_pydatetime(self.normalize().asi8, box="date")
def normalize(self):
"""
Return DatetimeIndex with times to midnight. Length is unaltered
Returns
-------
normalized : DatetimeIndex
"""
new_values = conversion.date_normalize(self.asi8, self.tz)
return DatetimeIndex(new_values, freq='infer', name=self.name,
tz=self.tz)
@Substitution(klass='DatetimeIndex')
@Appender(_shared_docs['searchsorted'])
@deprecate_kwarg(old_arg_name='key', new_arg_name='value')
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, (np.ndarray, Index)):
value = np.array(value, dtype=_NS_DTYPE, copy=False)
else:
value = _to_m8(value, tz=self.tz)
return self.values.searchsorted(value, side=side)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'datetime'
@property
def inferred_type(self):
# b/c datetime is represented as microseconds since the epoch, make
# sure we can't have ambiguous indexing
return 'datetime64'
@cache_readonly
def dtype(self):
if self.tz is None:
return _NS_DTYPE
return DatetimeTZDtype('ns', self.tz)
@property
def is_all_dates(self):
return True
@cache_readonly
def is_normalized(self):
"""
Returns True if all of the dates are at midnight ("no time")
"""
return conversion.is_date_array_normalized(self.asi8, self.tz)
@cache_readonly
def _resolution(self):
return libperiod.resolution(self.asi8, self.tz)
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
if is_scalar(item) and isna(item):
# GH 18295
item = self._na_value
freq = None
if isinstance(item, (datetime, np.datetime64)):
self._assert_can_do_op(item)
if not self._has_same_tz(item):
raise ValueError(
'Passed item and index have different timezone')
# check freq can be preserved on edge cases
if self.size and self.freq is not None:
if ((loc == 0 or loc == -len(self)) and
item + self.freq == self[0]):
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = _to_m8(item, tz=self.tz)
try:
new_dates = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
if self.tz is not None:
new_dates = conversion.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq,
tz=self.tz)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, compat.string_types):
return self.asobject.insert(loc, item)
raise TypeError(
"cannot insert DatetimeIndex with incompatible label")
def delete(self, loc):
"""
Make a new DatetimeIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : DatetimeIndex
"""
new_dates = np.delete(self.asi8, loc)
freq = None
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(
_ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
if self.tz is not None:
new_dates = conversion.tz_convert(new_dates, 'UTC', self.tz)
return DatetimeIndex(new_dates, name=self.name, freq=freq, tz=self.tz)
def tz_convert(self, tz):
"""
Convert tz-aware DatetimeIndex from one time zone to another (using
pytz/dateutil)
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding UTC time.
Returns
-------
normalized : DatetimeIndex
Raises
------
TypeError
If DatetimeIndex is tz-naive.
"""
tz = timezones.maybe_get_tz(tz)
if self.tz is None:
# tz naive, use tz_localize
raise TypeError('Cannot convert tz-naive timestamps, use '
'tz_localize to localize')
# No conversion since timestamps are all UTC to begin with
return self._shallow_copy(tz=tz)
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
def tz_localize(self, tz, ambiguous='raise', errors='raise'):
"""
Localize tz-naive DatetimeIndex to given time zone (using
pytz/dateutil), or remove timezone from tz-aware DatetimeIndex
Parameters
----------
tz : string, pytz.timezone, dateutil.tz.tzfile or None
Time zone for time. Corresponding timestamps would be converted to
time zone of the TimeSeries.
None will remove timezone holding local time.
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False signifies a
non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
errors : 'raise', 'coerce', default 'raise'
- 'raise' will raise a NonExistentTimeError if a timestamp is not
valid in the specified timezone (e.g. due to a transition from
or to DST time)
- 'coerce' will return NaT if the timestamp can not be converted
into the specified timezone
.. versionadded:: 0.19.0
infer_dst : boolean, default False
.. deprecated:: 0.15.0
Attempt to infer fall dst-transition hours based on order
Returns
-------
localized : DatetimeIndex
Raises
------
TypeError
If the DatetimeIndex is tz-aware and tz is not None.
"""
if self.tz is not None:
if tz is None:
new_dates = conversion.tz_convert(self.asi8, 'UTC', self.tz)
else:
raise TypeError("Already tz-aware, use tz_convert to convert.")
else:
tz = timezones.maybe_get_tz(tz)
# Convert to UTC
new_dates = conversion.tz_localize_to_utc(self.asi8, tz,
ambiguous=ambiguous,
errors=errors)
new_dates = new_dates.view(_NS_DTYPE)
return self._shallow_copy(new_dates, tz=tz)
def indexer_at_time(self, time, asof=False):
"""
Select values at particular time of day (e.g. 9:30AM)
Parameters
----------
time : datetime.time or string
Returns
-------
values_at_time : TimeSeries
"""
from dateutil.parser import parse
if asof:
raise NotImplementedError("'asof' argument is not supported")
if isinstance(time, compat.string_types):
time = parse(time).time()
if time.tzinfo:
# TODO
raise NotImplementedError("argument 'time' with timezone info is "
"not supported")
time_micros = self._get_time_micros()
micros = _time_to_micros(time)
return (micros == time_micros).nonzero()[0]
def indexer_between_time(self, start_time, end_time, include_start=True,
include_end=True):
"""
Select values between particular times of day (e.g., 9:00-9:30AM).
Return values of the index between two times. If start_time or
end_time are strings then tseries.tools.to_time is used to convert to
a time object.
Parameters
----------
start_time, end_time : datetime.time, str
datetime.time or string in appropriate format ("%H:%M", "%H%M",
"%I:%M%p", "%I%M%p", "%H:%M:%S", "%H%M%S", "%I:%M:%S%p",
"%I%M%S%p")
include_start : boolean, default True
include_end : boolean, default True
Returns
-------
values_between_time : TimeSeries
"""
start_time = to_time(start_time)
end_time = to_time(end_time)
time_micros = self._get_time_micros()
start_micros = _time_to_micros(start_time)
end_micros = _time_to_micros(end_time)
if include_start and include_end:
lop = rop = operator.le
elif include_start:
lop = operator.le
rop = operator.lt
elif include_end:
lop = operator.lt
rop = operator.le
else:
lop = rop = operator.lt
if start_time <= end_time:
join_op = operator.and_
else:
join_op = operator.or_
mask = join_op(lop(start_micros, time_micros),
rop(time_micros, end_micros))
return mask.nonzero()[0]
def to_julian_date(self):
"""
Convert DatetimeIndex to Float64Index of Julian Dates.
0 Julian date is noon January 1, 4713 BC.
http://en.wikipedia.org/wiki/Julian_day
"""
# http://mysite.verizon.net/aesir_research/date/jdalg2.htm
year = np.asarray(self.year)
month = np.asarray(self.month)
day = np.asarray(self.day)
testarr = month < 3
year[testarr] -= 1
month[testarr] += 12
return Float64Index(day +
np.fix((153 * month - 457) / 5) +
365 * year +
np.floor(year / 4) -
np.floor(year / 100) +
np.floor(year / 400) +
1721118.5 +
(self.hour +
self.minute / 60.0 +
self.second / 3600.0 +
self.microsecond / 3600.0 / 1e+6 +
self.nanosecond / 3600.0 / 1e+9
) / 24.0)
DatetimeIndex._add_comparison_methods()
DatetimeIndex._add_numeric_methods_disabled()
DatetimeIndex._add_logical_methods_disabled()
DatetimeIndex._add_datetimelike_methods()
def _generate_regular_range(start, end, periods, offset):
if isinstance(offset, Tick):
stride = offset.nanos
if periods is None:
b = Timestamp(start).value
# cannot just use e = Timestamp(end) + 1 because arange breaks when
# stride is too large, see GH10887
e = (b + (Timestamp(end).value - b) // stride * stride +
stride // 2 + 1)
# end.tz == start.tz by this point due to _generate implementation
tz = start.tz
elif start is not None:
b = Timestamp(start).value
e = b + np.int64(periods) * stride
tz = start.tz
elif end is not None:
e = Timestamp(end).value + stride
b = e - np.int64(periods) * stride
tz = end.tz
else:
raise ValueError("at least 'start' or 'end' should be specified "
"if a 'period' is given.")
data = np.arange(b, e, stride, dtype=np.int64)
data = DatetimeIndex._simple_new(data, None, tz=tz)
else:
if isinstance(start, Timestamp):
start = start.to_pydatetime()
if isinstance(end, Timestamp):
end = end.to_pydatetime()
xdr = generate_range(start=start, end=end,
periods=periods, offset=offset)
dates = list(xdr)
# utc = len(dates) > 0 and dates[0].tzinfo is not None
data = tools.to_datetime(dates)
return data
def date_range(start=None, end=None, periods=None, freq='D', tz=None,
normalize=False, name=None, closed=None, **kwargs):
"""
Return a fixed frequency DatetimeIndex, with day (calendar) as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, default 'D' (calendar daily)
Frequency strings can have multiples, e.g. '5H'
tz : string, default None
Time zone name for returning localized DatetimeIndex, for example
Asia/Hong_Kong
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : string, default None
Name of the resulting DatetimeIndex
closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : DatetimeIndex
"""
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def bdate_range(start=None, end=None, periods=None, freq='B', tz=None,
normalize=True, name=None, weekmask=None, holidays=None,
closed=None, **kwargs):
"""
Return a fixed frequency DatetimeIndex, with business day as the default
frequency
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, default 'B' (business daily)
Frequency strings can have multiples, e.g. '5H'
tz : string or None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : string, default None
Name of the resulting DatetimeIndex
weekmask : string or None, default None
Weekmask of valid business days, passed to ``numpy.busdaycalendar``,
only used when custom frequency strings are passed. The default
value None is equivalent to 'Mon Tue Wed Thu Fri'
.. versionadded:: 0.21.0
holidays : list-like or None, default None
Dates to exclude from the set of valid business days, passed to
``numpy.busdaycalendar``, only used when custom frequency strings
are passed
.. versionadded:: 0.21.0
closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : DatetimeIndex
"""
if is_string_like(freq) and freq.startswith('C'):
try:
weekmask = weekmask or 'Mon Tue Wed Thu Fri'
freq = prefix_mapping[freq](holidays=holidays, weekmask=weekmask)
except (KeyError, TypeError):
msg = 'invalid custom frequency string: {freq}'.format(freq=freq)
raise ValueError(msg)
elif holidays or weekmask:
msg = ('a custom frequency string is required when holidays or '
'weekmask are passed, got frequency {freq}').format(freq=freq)
raise ValueError(msg)
return DatetimeIndex(start=start, end=end, periods=periods,
freq=freq, tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def cdate_range(start=None, end=None, periods=None, freq='C', tz=None,
normalize=True, name=None, closed=None, **kwargs):
"""
Return a fixed frequency DatetimeIndex, with CustomBusinessDay as the
default frequency
.. deprecated:: 0.21.0
Parameters
----------
start : string or datetime-like, default None
Left bound for generating dates
end : string or datetime-like, default None
Right bound for generating dates
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, default 'C' (CustomBusinessDay)
Frequency strings can have multiples, e.g. '5H'
tz : string, default None
Time zone name for returning localized DatetimeIndex, for example
Asia/Beijing
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
name : string, default None
Name of the resulting DatetimeIndex
weekmask : string, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Notes
-----
Of the three parameters: ``start``, ``end``, and ``periods``, exactly two
must be specified.
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Returns
-------
rng : DatetimeIndex
"""
warnings.warn("cdate_range is deprecated and will be removed in a future "
"version, instead use pd.bdate_range(..., freq='{freq}')"
.format(freq=freq), FutureWarning, stacklevel=2)
if freq == 'C':
holidays = kwargs.pop('holidays', [])
weekmask = kwargs.pop('weekmask', 'Mon Tue Wed Thu Fri')
freq = CDay(holidays=holidays, weekmask=weekmask)
return DatetimeIndex(start=start, end=end, periods=periods, freq=freq,
tz=tz, normalize=normalize, name=name,
closed=closed, **kwargs)
def _to_m8(key, tz=None):
"""
Timestamp-like => dt64
"""
if not isinstance(key, Timestamp):
# this also converts strings
key = Timestamp(key, tz=tz)
return np.int64(conversion.pydt_to_i8(key)).view(_NS_DTYPE)
_CACHE_START = Timestamp(datetime(1950, 1, 1))
_CACHE_END = Timestamp(datetime(2030, 1, 1))
_daterange_cache = {}
def _naive_in_cache_range(start, end):
if start is None or end is None:
return False
else:
if start.tzinfo is not None or end.tzinfo is not None:
return False
return _in_range(start, end, _CACHE_START, _CACHE_END)
def _in_range(start, end, rng_start, rng_end):
return start > rng_start and end < rng_end
def _use_cached_range(offset, _normalized, start, end):
return (offset._should_cache() and
not (offset._normalize_cache and not _normalized) and
_naive_in_cache_range(start, end))
def _time_to_micros(time):
seconds = time.hour * 60 * 60 + 60 * time.minute + time.second
return 1000000 * seconds + time.microsecond
| bsd-3-clause |
btabibian/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 3 | 5427 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import itertools
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.exceptions import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
from sklearn.covariance import fast_mcd
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
assert_raise_message(ValueError, 'Got X with X.ndim=1',
fast_mcd, X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
assert_raise_message(ValueError, 'Got X with X.ndim=1',
mcd.fit, X)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_mcd_issue3367():
# Check that MCD completes when the covariance matrix is singular
# i.e. one of the rows and columns are all zeros
rand_gen = np.random.RandomState(0)
# Think of these as the values for X and Y -> 10 values between -5 and 5
data_values = np.linspace(-5, 5, 10).tolist()
# Get the cartesian product of all possible coordinate pairs from above set
data = np.array(list(itertools.product(data_values, data_values)))
# Add a third column that's all zeros to make our data a set of point
# within a plane, which means that the covariance matrix will be singular
data = np.hstack((data, np.zeros((data.shape[0], 1))))
# The below line of code should raise an exception if the covariance matrix
# is singular. As a further test, since we have points in XYZ, the
# principle components (Eigenvectors) of these directly relate to the
# geometry of the points. Since it's a plane, we should be able to test
# that the Eigenvector that corresponds to the smallest Eigenvalue is the
# plane normal, specifically [0, 0, 1], since everything is in the XY plane
# (as I've set it up above). To do this one would start by:
#
# evals, evecs = np.linalg.eigh(mcd_fit.covariance_)
# normal = evecs[:, np.argmin(evals)]
#
# After which we need to assert that our `normal` is equal to [0, 0, 1].
# Do note that there is floating point error associated with this, so it's
# best to subtract the two and then compare some small tolerance (e.g.
# 1e-12).
MinCovDet(random_state=rand_gen).fit(data)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
abhishekkrthakur/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
ChanderG/scikit-learn | sklearn/tests/test_cross_validation.py | 70 | 41943 | """Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy import stats
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer, LabelBinarizer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
y = np.arange(10) // 2
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 2]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
assert_raises(ValueError, cval.StratifiedKFold, y, 0)
assert_raises(ValueError, cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
sorted_array = np.arange(100)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(101, 200)
assert_true(np.any(sorted_array != ind[train]))
sorted_array = np.arange(201, 300)
assert_true(np.any(sorted_array != ind[train]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.lib.arraysetops.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist(), allow_lists=False)
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
X_train_arr, X_test_arr = cval.train_test_split(X_df, allow_lists=False)
assert_true(isinstance(X_train_arr, np.ndarray))
assert_true(isinstance(X_test_arr, np.ndarray))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_seq_of_seqs = [[], [1, 2], [3], [0, 1, 3], [2]]
with warnings.catch_warnings(record=True):
# deprecated sequence of sequence format
cv = cval.check_cv(3, X, y_seq_of_seqs, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_indicator_matrix = LabelBinarizer().fit_transform(y_seq_of_seqs)
cv = cval.check_cv(3, X, y_indicator_matrix, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
| bsd-3-clause |
Mutos/SoC-Test-001 | utils/heatsim/heatsim.py | 20 | 7285 | #!/usr/bin/env python
#######################################################
#
# SIM CODE
#
#######################################################
# Imports
from frange import *
import math
import matplotlib.pyplot as plt
def clamp( a, b, x ):
return min( b, max( a, x ) )
class heatsim:
def __init__( self, shipname = "llama", weapname = "laser", simulation = [ 60., 120. ] ):
# Sim parameters
self.STEFAN_BOLZMANN = 5.67e-8
self.SPACE_TEMP = 250.
self.STEEL_COND = 54.
self.STEEL_CAP = 0.49
self.STEEL_DENS = 7.88e3
self.ACCURACY_LIMIT = 500
self.FIRERATE_LIMIT = 800
self.shipname = shipname
self.weapname = weapname
# Sim info
self.sim_dt = 1./50. # Delta tick
self.setSimulation( simulation )
# Load some data
self.ship_mass, self.ship_weaps = self.loadship( shipname )
self.weap_mass, self.weap_delay, self.weap_energy = self.loadweap( weapname )
def setSimulation( self, simulation ):
self.simulation = simulation
self.sim_total = simulation[-1]
def loadship( self, shipname ):
"Returns mass, number of weaps."
if shipname == "llama":
return 80., 2
elif shipname == "lancelot":
return 180., 4
elif shipname == "pacifier":
return 730., 5
elif shipname == "hawking":
return 3750., 7
elif shipname == "peacemaker":
return 6200., 8
else:
raise ValueError
def loadweap( self, weapname ):
"Returns mass, delay, energy."
if weapname == "laser":
return 2., 0.9, 4.25
elif weapname == "plasma":
return 4., 0.675, 3.75
elif weapname == "ion":
return 6., 1.440, 15.
elif weapname == "laser turret":
return 16., 0.540, 6.12
elif weapname == "ion turret":
return 42., 0.765, 25.
elif weapname == "railgun turret":
return 60., 1.102, 66.
else:
raise ValueError
def prepare( self ):
# Time stuff
self.time_data = []
# Calculate ship parameters
ship_kg = self.ship_mass * 1000.
self.ship_emis = 0.8
self.ship_cond = self.STEEL_COND
self.ship_C = self.STEEL_CAP * ship_kg
#self.ship_area = pow( ship_kg / self.STEEL_DENS, 2./3. )
self.ship_area = 4.*math.pi*pow( 3./4.*ship_kg/self.STEEL_DENS/math.pi, 2./3. )
self.ship_T = self.SPACE_TEMP
self.ship_data = []
# Calculate weapon parameters
weap_kg = self.weap_mass * 1000.
self.weap_C = self.STEEL_CAP * weap_kg
#self.weap_area = pow( weap_kg / self.STEEL_DENS, 2./3. )
self.weap_area = 2.*math.pi*pow( 3./4.*weap_kg/self.STEEL_DENS/math.pi, 2./3. )
self.weap_list = []
self.weap_T = []
self.weap_data = []
for i in range(self.ship_weaps):
self.weap_list.append( i*self.weap_delay / self.ship_weaps )
self.weap_T.append( self.SPACE_TEMP )
self.weap_data.append( [] )
def __accMod( self, T ):
return clamp( 0., 1., (T-500.)/600. )
def __frMod( self, T ):
return clamp( 0., 1., (1100.-T)/300. )
def simulate( self ):
"Begins the simulation."
# Prepare it
self.prepare()
# Run simulation
weap_on = True
sim_index = 0
dt = self.sim_dt
sim_elapsed = 0.
while sim_elapsed < self.sim_total:
Q_cond = 0.
# Check weapons
for i in range(len(self.weap_list)):
# Check if we should start/stop shooting
if self.simulation[ sim_index ] < sim_elapsed:
weap_on = not weap_on
sim_index += 1
# Check if shot
if weap_on:
self.weap_list[i] -= dt * self.__frMod( self.weap_T[i] )
if self.weap_list[i] < 0.:
self.weap_T[i] += 1e4 * self.weap_energy / self.weap_C
self.weap_list[i] += self.weap_delay
# Do heat movement (conduction)
Q = -self.ship_cond * (self.weap_T[i] - self.ship_T) * self.weap_area * dt
self.weap_T[i] += Q / self.weap_C
Q_cond += Q
self.weap_data[i].append( self.weap_T[i] )
# Do ship heat (radiation)
Q_rad = self.STEFAN_BOLZMANN * self.ship_area * self.ship_emis * (pow(self.SPACE_TEMP,4.) - pow(self.ship_T,4.)) * dt
Q = Q_rad - Q_cond
self.ship_T += Q / self.ship_C
self.time_data.append( sim_elapsed )
self.ship_data.append( self.ship_T )
# Elapsed time
sim_elapsed += dt;
def save( self, filename ):
"Saves the results to a file."
f = open( self.filename, 'w' )
for i in range(self.time_data):
f.write( str(self.time_data[i])+' '+str(self.ship_data[i]))
for j in range(self.weap_data):
f.write( ' '+str(self.weap_data[i][j]) )
f.write( '\n' )
f.close()
def display( self ):
print("Ship Temp: "+str(hs.ship_T)+" K")
for i in range(len(hs.weap_list)):
print("Outfit["+str(i)+"] Temp: "+str(hs.weap_T[i])+" K")
def plot( self, filename=None ):
plt.hold(False)
plt.figure(1)
# Plot 1 Data
plt.subplot(211)
plt.plot( self.time_data, self.ship_data, '-' )
# Plot 1 Info
plt.axis( [0, self.sim_total, 0, 1100] )
plt.title( 'NAEV Heat Simulation ('+self.shipname+' with '+self.weapname+')' )
plt.legend( ('Ship', 'Accuracy Limit', 'Fire Rate Limit'), loc='upper left')
plt.ylabel( 'Temperature [K]' )
plt.grid( True )
# Plot 1 Data
plt.subplot(212)
plt.plot( self.time_data, self.weap_data[0], '-' )
plt.hold(True)
plt_data = []
for i in range(len(self.weap_data[0])):
plt_data.append( self.ACCURACY_LIMIT )
plt.plot( self.time_data, plt_data, '--' )
plt_data = []
for i in range(len(self.weap_data[0])):
plt_data.append( self.FIRERATE_LIMIT )
plt.plot( self.time_data, plt_data, '-.' )
plt.hold(False)
# Plot 2 Info
plt.axis( [0, self.sim_total, 0, 1100] )
plt.legend( ('Weapon', 'Accuracy Limit', 'Fire Rate Limit'), loc='upper right')
plt.ylabel( 'Temperature [K]' )
plt.xlabel( 'Time [s]' )
plt.grid( True )
if filename == None:
plt.show()
else:
plt.savefig( filename )
if __name__ == "__main__":
print("NAEV HeatSim\n")
shp_lst = { 'llama' : 'laser',
'lancelot' : 'ion',
'pacifier' : 'laser turret',
'hawking' : 'ion turret',
'peacemaker' : 'railgun turret' }
for shp,wpn in shp_lst.items():
hs = heatsim( shp, wpn, (60., 120.) )
#hs = heatsim( shp, wpn, frange( 30., 600., 30. ) )
hs.simulate()
hs.plot( shp+'_'+wpn+'_60_60.png' )
hs.setSimulation( (30., 90.) )
hs.simulate()
hs.plot( shp+'_'+wpn+'_30_60.png' )
hs.setSimulation( (30., 90., 120., 180.) )
hs.simulate()
hs.plot( shp+'_'+wpn+'_30_60_30_60.png' )
print( ' '+shp+' with '+wpn+' done!' )
| gpl-3.0 |
jangorecki/h2o-3 | h2o-py/tests/testdir_misc/pyunit_export_file.py | 6 | 1657 | from __future__ import print_function
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
import string
import random
import pandas as pd
from pandas.util.testing import assert_frame_equal
'''
Export file with h2o.export_file and compare with Python counterpart when re importing file to check for parity.
'''
def export_file():
pros_hex = h2o.upload_file(pyunit_utils.locate("smalldata/prostate/prostate.csv"))
pros_hex[1] = pros_hex[1].asfactor()
pros_hex[3] = pros_hex[3].asfactor()
pros_hex[4] = pros_hex[4].asfactor()
pros_hex[5] = pros_hex[5].asfactor()
pros_hex[8] = pros_hex[8].asfactor()
p_sid = pros_hex.runif()
pros_train = pros_hex[p_sid > .2, :]
pros_test = pros_hex[p_sid <= .2, :]
glm = H2OGeneralizedLinearEstimator(family="binomial")
myglm = glm.train(x=list(range(2, pros_hex.ncol)), y=1, training_frame=pros_train)
mypred = glm.predict(pros_test)
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
fname = id_generator() + "_prediction.csv"
path = pyunit_utils.locate("results")
dname = path + "/" + fname
h2o.export_file(mypred,dname)
py_pred = pd.read_csv(dname)
print(py_pred.head())
h_pred = mypred.as_data_frame(True)
print(h_pred.head())
#Test to check if py_pred & h_pred are identical
assert_frame_equal(py_pred,h_pred)
if __name__ == "__main__":
pyunit_utils.standalone_test(export_file)
else:
export_file()
| apache-2.0 |
FNCS/ns-3.26 | src/core/examples/sample-rng-plot.py | 188 | 1246 | # -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
| gpl-2.0 |
balazssimon/ml-playground | udemy/lazyprogrammer/deep-reinforcement-learning-python/atari/dqn_theano.py | 1 | 10655 | # https://deeplearningcourses.com/c/deep-reinforcement-learning-in-python
# https://www.udemy.com/deep-reinforcement-learning-in-python
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import copy
import gym
import os
import sys
import random
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.nnet import conv2d
import matplotlib.pyplot as plt
from gym import wrappers
from datetime import datetime
from scipy.misc import imresize
##### testing only
# MAX_EXPERIENCES = 10000
# MIN_EXPERIENCES = 1000
MAX_EXPERIENCES = 500000
MIN_EXPERIENCES = 50000
TARGET_UPDATE_PERIOD = 10000
IM_SIZE = 80
K = 4 #env.action_space.n
def downsample_image(A):
B = A[31:195] # select the important parts of the image
B = B.mean(axis=2) # convert to grayscale
# downsample image
# changing aspect ratio doesn't significantly distort the image
# nearest neighbor interpolation produces a much sharper image
# than default bilinear
B = imresize(B, size=(IM_SIZE, IM_SIZE), interp='nearest')
return B
def update_state(state, obs):
obs_small = downsample_image(obs)
return np.append(state[1:], np.expand_dims(obs_small, 0), axis=0)
def init_filter(shape):
w = np.random.randn(*shape) * np.sqrt(2.0 / np.prod(shape[1:]))
return w.astype(np.float32)
class ConvLayer(object):
def __init__(self, mi, mo, filtsz=5, stride=2, f=T.nnet.relu):
# mi = input feature map size
# mo = output feature map size
sz = (mo, mi, filtsz, filtsz)
W0 = init_filter(sz)
self.W = theano.shared(W0)
b0 = np.zeros(mo, dtype=np.float32)
self.b = theano.shared(b0)
self.stride = (stride, stride)
self.params = [self.W, self.b]
self.f = f
def forward(self, X):
conv_out = conv2d(
input=X,
filters=self.W,
subsample=self.stride,
border_mode='half',
)
# cut off 1 pixel from each edge
# to make the output the same size as input
# like tensorflow
return self.f(conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
class HiddenLayer:
def __init__(self, M1, M2, f=T.nnet.relu):
W = np.random.randn(M1, M2) * np.sqrt(2 / M1)
self.W = theano.shared(W.astype(np.float32))
self.b = theano.shared(np.zeros(M2).astype(np.float32))
self.params = [self.W, self.b]
self.f = f
def forward(self, X):
a = X.dot(self.W) + self.b
return self.f(a)
class DQN:
def __init__(self, K, conv_layer_sizes, hidden_layer_sizes, gamma):
self.K = K
lr = np.float32(2.5e-4)
mu = np.float32(0)
decay = np.float32(0.99)
eps = np.float32(1e-10)
# inputs and targets
X = T.ftensor4('X')
G = T.fvector('G')
actions = T.ivector('actions')
# create the graph
self.conv_layers = []
num_input_filters = 4 # number of filters / color channels
for num_output_filters, filtersz, stride in conv_layer_sizes:
layer = ConvLayer(num_input_filters, num_output_filters, filtersz, stride)
self.conv_layers.append(layer)
num_input_filters = num_output_filters
##### debug #####
# Z = X / 255.0
# j = 0
# for layer in self.conv_layers:
# Z = layer.forward(Z)
# out = Z
# op = theano.function(inputs=[X], outputs=out, allow_input_downcast=True)
# test = op(np.random.randn(1, 4, IM_SIZE, IM_SIZE))
# print("output size after conv %d: %s" % (j, test.shape))
# j += 1
# get conv output size
Z = X / 255.0
for layer in self.conv_layers:
Z = layer.forward(Z)
conv_out = Z.flatten(ndim=2)
conv_out_op = theano.function(inputs=[X], outputs=conv_out, allow_input_downcast=True)
test = conv_out_op(np.random.randn(1, 4, IM_SIZE, IM_SIZE))
flattened_ouput_size = test.shape[1]
# build fully connected layers
self.layers = []
M1 = flattened_ouput_size
for M2 in hidden_layer_sizes:
layer = HiddenLayer(M1, M2)
self.layers.append(layer)
M1 = M2
# final layer
layer = HiddenLayer(M1, K, lambda x: x)
self.layers.append(layer)
# collect params for copy
self.params = []
for layer in (self.conv_layers + self.layers):
self.params += layer.params
# calculate final output and cost
Z = conv_out
for layer in self.layers:
Z = layer.forward(Z)
Y_hat = Z
selected_action_values = Y_hat[T.arange(actions.shape[0]), actions]
cost = T.mean((G - selected_action_values)**2)
# create train function
# we need to ensure cache is updated before parameter update
# by creating a list of new_caches
# and using them in the parameter update
grads = T.grad(cost, self.params)
caches = [theano.shared(np.ones_like(p.get_value())) for p in self.params]
new_caches = [decay*c + (np.float32(1) - decay)*g*g for c, g in zip(caches, grads)]
c_update = [(c, new_c) for c, new_c in zip(caches, new_caches)]
g_update = [
(p, p - lr*g / T.sqrt(new_c + eps)) for p, new_c, g in zip(self.params, new_caches, grads)
]
updates = c_update + g_update
# compile functions
self.train_op = theano.function(
inputs=[X, G, actions],
updates=updates,
allow_input_downcast=True
)
self.predict_op = theano.function(
inputs=[X],
outputs=Y_hat,
allow_input_downcast=True
)
def copy_from(self, other):
my_params = self.params
other_params = other.params
for p, q in zip(my_params, other_params):
actual = q.get_value()
p.set_value(actual)
def predict(self, X):
return self.predict_op(X)
def update(self, states, actions, targets):
self.train_op(states, targets, actions)
def sample_action(self, x, eps):
if np.random.random() < eps:
return np.random.choice(self.K)
else:
return np.argmax(self.predict([x])[0])
def learn(model, target_model, experience_replay_buffer, gamma, batch_size):
# Sample experiences
samples = random.sample(experience_replay_buffer, batch_size)
states, actions, rewards, next_states, dones = map(np.array, zip(*samples))
# Calculate targets
next_Qs = target_model.predict(next_states)
next_Q = np.amax(next_Qs, axis=1)
targets = rewards + np.invert(dones).astype(np.float32) * gamma * next_Q
# Update model
loss = model.update(states, actions, targets)
return loss
def play_one(
env,
total_t,
experience_replay_buffer,
model,
target_model,
gamma,
batch_size,
epsilon,
epsilon_change,
epsilon_min):
t0 = datetime.now()
# Reset the environment
obs = env.reset()
obs_small = downsample_image(obs)
state = np.stack([obs_small] * 4, axis=0)
assert(state.shape == (4, 80, 80))
loss = None
total_time_training = 0
num_steps_in_episode = 0
episode_reward = 0
done = False
while not done:
# Update target network
if total_t % TARGET_UPDATE_PERIOD == 0:
target_model.copy_from(model)
print("Copied model parameters to target network. total_t = %s, period = %s" % (total_t, TARGET_UPDATE_PERIOD))
# Take action
action = model.sample_action(state, epsilon)
obs, reward, done, _ = env.step(action)
obs_small = downsample_image(obs)
next_state = np.append(state[1:], np.expand_dims(obs_small, 0), axis=0)
# assert(state.shape == (4, 80, 80))
episode_reward += reward
# Remove oldest experience if replay buffer is full
if len(experience_replay_buffer) == MAX_EXPERIENCES:
experience_replay_buffer.pop(0)
# Save the latest experience
experience_replay_buffer.append((state, action, reward, next_state, done))
# Train the model, keep track of time
t0_2 = datetime.now()
loss = learn(model, target_model, experience_replay_buffer, gamma, batch_size)
dt = datetime.now() - t0_2
total_time_training += dt.total_seconds()
num_steps_in_episode += 1
state = next_state
total_t += 1
epsilon = max(epsilon - epsilon_change, epsilon_min)
return total_t, episode_reward, (datetime.now() - t0), num_steps_in_episode, total_time_training/num_steps_in_episode, epsilon
if __name__ == '__main__':
# hyperparams and initialize stuff
conv_layer_sizes = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]
hidden_layer_sizes = [512]
gamma = 0.99
batch_sz = 32
num_episodes = 10000
total_t = 0
experience_replay_buffer = []
episode_rewards = np.zeros(num_episodes)
step_counts = np.zeros(num_episodes)
# epsilon
# decays linearly until 0.1
epsilon = 1.0
epsilon_min = 0.1
epsilon_change = (epsilon - epsilon_min) / 500000
# Create environment
env = gym.envs.make("Breakout-v0")
# Create models
model = DQN(
K=K,
conv_layer_sizes=conv_layer_sizes,
hidden_layer_sizes=hidden_layer_sizes,
gamma=gamma,
# scope="model"
)
target_model = DQN(
K=K,
conv_layer_sizes=conv_layer_sizes,
hidden_layer_sizes=hidden_layer_sizes,
gamma=gamma,
# scope="target_model"
)
print("Populating experience replay buffer...")
obs = env.reset()
obs_small = downsample_image(obs)
state = np.stack([obs_small] * 4, axis=0)
# assert(state.shape == (4, 80, 80))
for i in range(MIN_EXPERIENCES):
action = np.random.choice(K)
obs, reward, done, _ = env.step(action)
next_state = update_state(state, obs)
# assert(state.shape == (4, 80, 80))
experience_replay_buffer.append((state, action, reward, next_state, done))
if done:
obs = env.reset()
obs_small = downsample_image(obs)
state = np.stack([obs_small] * 4, axis=0)
# assert(state.shape == (4, 80, 80))
else:
state = next_state
# Play a number of episodes and learn!
for i in range(num_episodes):
total_t, episode_reward, duration, num_steps_in_episode, time_per_step, epsilon = play_one(
env,
total_t,
experience_replay_buffer,
model,
target_model,
gamma,
batch_sz,
epsilon,
epsilon_change,
epsilon_min,
)
episode_rewards[i] = episode_reward
step_counts[i] = num_steps_in_episode
last_100_avg = episode_rewards[max(0, i - 100):i + 1].mean()
last_100_avg_steps = step_counts[max(0, i - 100):i + 1].mean()
print("Episode:", i,
"Duration:", duration,
"Num steps:", num_steps_in_episode,
"Reward:", episode_reward,
"Training time per step:", "%.3f" % time_per_step,
"Avg Reward (Last 100):", "%.3f" % last_100_avg,
"Avg Steps (Last 100):", "%.1f" % last_100_avg_steps,
"Epsilon:", "%.3f" % epsilon
)
sys.stdout.flush()
| apache-2.0 |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/matplotlib/backend_bases.py | 1 | 106921 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes suh as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
:class:`ShowBase`
The base class for the Show class of each interactive backend;
the 'show' callable is then set to Show.__call__, inherited from
ShowBase.
"""
import os
import warnings
import time
import io
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
#import matplotlib.path as path
from matplotlib import rcParams
from matplotlib import is_interactive
from matplotlib import get_backend
from matplotlib._pylab_helpers import Gcf
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
import matplotlib.tight_bbox as tight_bbox
import matplotlib.textpath as textpath
from matplotlib.path import Path
from matplotlib.cbook import mplDeprecation
try:
from PIL import Image
_has_pil = True
except ImportError:
_has_pil = False
_backend_d = {}
def register_backend(format, backend_class):
_backend_d[format] = backend_class
class ShowBase(object):
"""
Simple base class to generate a show() callable in backends.
Subclass must override mainloop() method.
"""
def __call__(self, block=None):
"""
Show all figures. If *block* is not None, then
it is a boolean that overrides all other factors
determining whether show blocks by calling mainloop().
The other factors are:
it does not block if run inside "ipython --pylab";
it does not block in interactive mode.
"""
managers = Gcf.get_all_fig_managers()
if not managers:
return
for manager in managers:
manager.show()
if block is not None:
if block:
self.mainloop()
return
else:
return
# Hack: determine at runtime whether we are
# inside ipython in pylab mode.
from matplotlib import pyplot
try:
ipython_pylab = not pyplot.show._needmain
# IPython versions >= 0.10 tack the _needmain
# attribute onto pyplot.show, and always set
# it to False, when in --pylab mode.
ipython_pylab = ipython_pylab and get_backend() != 'WebAgg'
# TODO: The above is a hack to get the WebAgg backend
# working with `ipython --pylab` until proper integration
# is implemented.
except AttributeError:
ipython_pylab = False
# Leave the following as a separate step in case we
# want to control this behavior with an rcParam.
if ipython_pylab:
return
if not is_interactive() or get_backend() == 'WebAgg':
self.mainloop()
def mainloop(self):
pass
class RendererBase:
"""An abstract base class to handle drawing/rendering operations.
The following methods *must* be implemented in the backend:
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_text`
* :meth:`get_text_width_height_descent`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
self._text2path = textpath.TextToPath()
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s*. If *gid* is given, use
*gid* as the id of the group. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`.
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path,
trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
for vertices, codes in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans +
transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
"""
Draws a collection of paths selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before being
applied. *offset_position* may be either "screen" or "data"
depending on the space that the offsets are in.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
:meth:`draw_path`. Some backends may want to override this in
order to render each set of path data only once, and then
reference that path multiple times with the different offsets,
colors, styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transform))
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_ids, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
path, transform = path_id
transform = transforms.Affine2D(
transform.get_matrix()).translate(xo, yo)
self.draw_path(gc0, path, transform, rgbFace)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, edgecolors):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if edgecolors is None:
edgecolors = facecolors
linewidths = np.array([gc.get_linewidth()], np.float_)
return self.draw_path_collection(
gc, master_transform, paths, [], offsets, offsetTrans, facecolors,
edgecolors, linewidths, [], [antialiased], [None], 'screen')
def draw_gouraud_triangle(self, gc, points, colors, transform):
"""
Draw a Gouraud-shaded triangle.
*points* is a 3x2 array of (x, y) points for the triangle.
*colors* is a 3x4 array of RGBA colors for each point of the
triangle.
*transform* is an affine transform to apply to the points.
"""
raise NotImplementedError
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
"""
Draws a series of Gouraud triangles.
*points* is a Nx3x2 array of (x, y) points for the trianglex.
*colors* is a Nx3x4 array of RGBA colors for each point of the
triangles.
*transform* is an affine transform to apply to the points.
"""
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
def _iter_collection_raw_paths(self, master_transform, paths,
all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in range(N):
path = paths[i % Npaths]
if Ntransforms:
transform = all_transforms[i % Ntransforms]
yield path, transform + master_transform
def _iter_collection(self, gc, master_transform, all_transforms,
path_ids, offsets, offsetTrans, facecolors,
edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Ntransforms = len(all_transforms)
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc0 = self.new_gc()
gc0.copy_properties(gc)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc0.set_linewidth(0.0)
xo, yo = 0, 0
for i in range(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if offset_position == 'data':
if Ntransforms:
transform = (all_transforms[i % Ntransforms] +
master_transform)
else:
transform = master_transform
xo, yo = transform.transform_point((xo, yo))
xp, yp = transform.transform_point((0, 0))
xo = -(xp - xo)
yo = -(yp - yo)
if not (np.isfinite(xo) and np.isfinite(yo)):
continue
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
if Nlinewidths:
gc0.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc0.set_dashes(*linestyles[i % Nlinestyles])
fg = edgecolors[i % Nedgecolors]
if len(fg) == 4:
if fg[3] == 0.0:
gc0.set_linewidth(0)
else:
gc0.set_foreground(fg)
else:
gc0.set_foreground(fg)
if rgbFace is not None and len(rgbFace) == 4:
if rgbFace[3] == 0:
rgbFace = None
gc0.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc0.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc0, rgbFace
gc0.restore()
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, gc, x, y, im):
"""
Draw the image instance into the current axes;
*gc*
a GraphicsContext containing clipping information
*x*
is the distance in pixels from the left hand side of the canvas.
*y*
the distance from the origin. That is, if origin is
upper, y is the distance from top. If origin is lower, y
is the distance from bottom
*im*
the :class:`matplotlib._image.Image` instance
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
override this method for renderers that do not necessarily
want to rescale and composite raster images. (like SVG)
"""
return False
def option_scale_image(self):
"""
override this method for renderers that support arbitrary
scaling of image (most of the vector backend).
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
"""
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text baseline in display coords
*s*
the text string
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
*mtext*
a :class:`matplotlib.text.Text` instance
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be plotted along with
your text.
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)
def _get_text_path_transform(self, x, y, s, prop, angle, ismath):
"""
return the text path and transform
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
text2path = self._text2path
fontsize = self.points_to_pixels(prop.get_size_in_points())
if ismath == "TeX":
verts, codes = text2path.get_text_path(prop, s, ismath=False,
usetex=True)
else:
verts, codes = text2path.get_text_path(prop, s, ismath=ismath,
usetex=False)
path = Path(verts, codes)
angle = angle / 180. * 3.141592
if self.flipy():
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, self.height - y)
else:
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, y)
return path, transform
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
"""
draw the text by converting them to paths using textpath module.
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
path, transform = self._get_text_path_transform(
x, y, s, prop, angle, ismath)
color = gc.get_rgb()
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
if ismath == 'TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self._text2path.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
dpi = self.points_to_pixels(72)
if ismath:
dims = self._text2path.mathtext_parser.parse(s, dpi, prop)
return dims[0:3] # return width, height, descent
flags = self._text2path._get_hinting_flag()
font = self._text2path._get_font(prop)
size = prop.get_size_in_points()
font.set_size(size, dpi)
# the width and height of unrotated string
font.set_text(s, 0.0, flags=flags)
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, eg, postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
"""
Used in MixedModeRenderer. Switch to the raster renderer.
"""
pass
def stop_rasterizing(self):
"""
Used in MixedModeRenderer. Switch back to the vector renderer
and draw the contents of the raster renderer as an image on
the vector renderer.
"""
pass
def start_filter(self):
"""
Used in AggRenderer. Switch to a temporary renderer for image
filtering effects.
"""
pass
def stop_filter(self, filter_func):
"""
Used in AggRenderer. Switch back to the original renderer.
The contents of the temporary renderer is processed with the
*filter_func* and is drawn on the original renderer as an
image.
"""
pass
class GraphicsContextBase:
"""
An abstract base class that provides color, line styles, etc...
"""
# a mapping from dash styles to suggested offset, dash pairs
dashd = {
'solid': (None, None),
'dashed': (0, (6.0, 6.0)),
'dashdot': (0, (3.0, 5.0, 1.0, 5.0)),
'dotted': (0, (1.0, 3.0)),
}
def __init__(self):
self._alpha = 1.0
self._forced_alpha = False # if True, _alpha overrides A from RGBA
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'round'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0, 1.0)
self._orig_color = (0.0, 0.0, 0.0, 1.0)
self._hatch = None
self._url = None
self._gid = None
self._snap = None
self._sketch = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._forced_alpha = gc._forced_alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._orig_color = gc._orig_color
self._hatch = gc._hatch
self._url = gc._url
self._gid = gc._gid
self._snap = gc._snap
self._sketch = gc._sketch
def restore(self):
"""
Restore the graphics context from the stack - needed only
for backends that save graphics contexts on a stack
"""
pass
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox`
instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<http://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_forced_alpha(self):
"""
Return whether the value given by get_alpha() should be used to
override any other alpha-channel values.
"""
return self._forced_alpha
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three or four floats from 0-1.
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_gid(self):
"""
Return the object identifier if one is set, None otherwise.
"""
return self._gid
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
If ``alpha=None`` (the default), the alpha components of the
foreground and fill colors will be used to set their respective
transparencies (where applicable); otherwise, ``alpha`` will override
them.
"""
if alpha is not None:
self._alpha = alpha
self._forced_alpha = True
else:
self._alpha = 1.0
self._forced_alpha = False
self.set_foreground(self._orig_color)
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b:
self._antialiased = 1
else:
self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
assert path is None or isinstance(path, transforms.TransformedPath)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points.
``(None, None)`` specifies a solid line
"""
if dash_list is not None:
dl = np.asarray(dash_list)
if np.any(dl <= 0.0):
raise ValueError("All values in the dash list must be positive")
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGBA=False):
"""
Set the foreground color. fg can be a MATLAB format string, a
html hex color string, an rgb or rgba unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
If you know fg is rgba, set ``isRGBA=True`` for efficiency.
"""
self._orig_color = fg
if self._forced_alpha:
self._rgb = colors.colorConverter.to_rgba(fg, self._alpha)
elif isRGBA:
self._rgb = fg
else:
self._rgb = colors.colorConverter.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
self._orig_color = frac
self._rgb = (frac, frac, frac, self._alpha)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = w
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted'). One may specify customized dash styles by providing
a tuple of (offset, dash pairs). For example, the predefiend
linestyles have following values.:
'dashed' : (0, (6.0, 6.0)),
'dashdot' : (0, (3.0, 5.0, 1.0, 5.0)),
'dotted' : (0, (1.0, 3.0)),
"""
if style in self.dashd:
offset, dashes = self.dashd[style]
elif isinstance(style, tuple):
offset, dashes = style
else:
raise ValueError('Unrecognized linestyle: %s' % str(style))
self._linestyle = style
self.set_dashes(offset, dashes)
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_gid(self, id):
"""
Sets the id.
"""
self._gid = id
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
def get_hatch_path(self, density=6.0):
"""
Returns a Path for the current hatch.
"""
if self._hatch is None:
return None
return Path.hatch(self._hatch, density)
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
class TimerBase(object):
'''
A base class for providing timer events, useful for things animations.
Backends need to implement a few specific methods in order to use their
own timing mechanisms so that the timer events are integrated into their
event loops.
Mandatory functions that must be implemented:
* `_timer_start`: Contains backend-specific code for starting
the timer
* `_timer_stop`: Contains backend-specific code for stopping
the timer
Optional overrides:
* `_timer_set_single_shot`: Code for setting the timer to
single shot operating mode, if supported by the timer
object. If not, the `Timer` class itself will store the flag
and the `_on_timer` method should be overridden to support
such behavior.
* `_timer_set_interval`: Code for setting the interval on the
timer, if there is a method for doing so on the timer
object.
* `_on_timer`: This is the internal function that any timer
object should call, which will handle the task of running
all callbacks that have been set.
Attributes:
* `interval`: The time between timer events in
milliseconds. Default is 1000 ms.
* `single_shot`: Boolean flag indicating whether this timer
should operate as single shot (run once and then
stop). Defaults to `False`.
* `callbacks`: Stores list of (func, args) tuples that will be
called upon timer events. This list can be manipulated
directly, or the functions `add_callback` and
`remove_callback` can be used.
'''
def __init__(self, interval=None, callbacks=None):
#Initialize empty callbacks list and setup default settings if necssary
if callbacks is None:
self.callbacks = []
else:
self.callbacks = callbacks[:] # Create a copy
if interval is None:
self._interval = 1000
else:
self._interval = interval
self._single = False
# Default attribute for holding the GUI-specific timer object
self._timer = None
def __del__(self):
'Need to stop timer and possibly disconnect timer.'
self._timer_stop()
def start(self, interval=None):
'''
Start the timer object. `interval` is optional and will be used
to reset the timer interval first if provided.
'''
if interval is not None:
self._set_interval(interval)
self._timer_start()
def stop(self):
'''
Stop the timer.
'''
self._timer_stop()
def _timer_start(self):
pass
def _timer_stop(self):
pass
def _get_interval(self):
return self._interval
def _set_interval(self, interval):
# Force to int since none of the backends actually support fractional
# milliseconds, and some error or give warnings.
interval = int(interval)
self._interval = interval
self._timer_set_interval()
interval = property(_get_interval, _set_interval)
def _get_single_shot(self):
return self._single
def _set_single_shot(self, ss=True):
self._single = ss
self._timer_set_single_shot()
single_shot = property(_get_single_shot, _set_single_shot)
def add_callback(self, func, *args, **kwargs):
'''
Register `func` to be called by timer when the event fires. Any
additional arguments provided will be passed to `func`.
'''
self.callbacks.append((func, args, kwargs))
def remove_callback(self, func, *args, **kwargs):
'''
Remove `func` from list of callbacks. `args` and `kwargs` are optional
and used to distinguish between copies of the same function registered
to be called with different arguments.
'''
if args or kwargs:
self.callbacks.remove((func, args, kwargs))
else:
funcs = [c[0] for c in self.callbacks]
if func in funcs:
self.callbacks.pop(funcs.index(func))
def _timer_set_interval(self):
'Used to set interval on underlying timer object.'
pass
def _timer_set_single_shot(self):
'Used to set single shot on underlying timer object.'
pass
def _on_timer(self):
'''
Runs all function that have been registered as callbacks. Functions
can return False (or 0) if they should not be called any more. If there
are no callbacks, the timer is automatically stopped.
'''
for func, args, kwargs in self.callbacks:
ret = func(*args, **kwargs)
# docstring above explains why we use `if ret == False` here,
# instead of `if not ret`.
if ret == False:
self.callbacks.remove((func, args, kwargs))
if len(self.callbacks) == 0:
self.stop()
class Event:
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas, guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class CloseEvent(Event):
"""
An event triggered by a figure being closed
In addition to the :class:`Event` attributes, the following event
attributes are defined:
"""
def __init__(self, name, canvas, guiEvent=None):
Event.__init__(self, name, canvas, guiEvent)
class LocationEvent(Event):
"""
An event that has a screen location
The following additional attributes are defined and shown with
their default values.
In addition to the :class:`Event` attributes, the following
event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y, guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas, guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
if self.canvas.mouse_grabber is None:
axes_list = [a for a in self.canvas.figure.get_axes()
if a.in_axes(self)]
else:
axes_list = [self.canvas.mouse_grabber]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axes_list.sort(key=lambda x: x.zorder)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
trans = self.inaxes.transData.inverted()
xdata, ydata = trans.transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes != self.inaxes:
# process axes enter/leave events
try:
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
except:
pass
# See ticket 2901582.
# I think this is a valid exception to the rule
# against catching all exceptions; if anything goes
# wrong, we simply want to move on and process the
# current event.
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
for scroll events)
*key*
the key depressed when the mouse event triggered (see
:class:`KeyEvent`)
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
dblclick = None # whether or not the event is the result of a double click
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, dblclick=False, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
self.dblclick = dblclick
def __str__(self):
return ("MPL MouseEvent: xy=(%d,%d) xydata=(%s,%s) button=%d " +
"dblclick=%s inaxes=%s") % (self.x, self.y, self.xdata,
self.ydata, self.button,
self.dblclick, self.inaxes)
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- eg a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
line, = ax.plot(rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print('on pick line:', zip(xdata[ind], ydata[ind]))
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist,
guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key(s) pressed. Could be **None**, a single case sensitive ascii
character ("g", "G", "#", etc.), a special key
("control", "shift", "f1", "up", etc.) or a
combination of the above (e.g., "ctrl+alt+g", "ctrl+alt+G").
.. note::
Modifier keys will be prefixed to the pressed key and will be in the
order "ctrl", "alt", "super". The exception to this rule is when the
pressed key is itself a modifier key, therefore "ctrl+alt" and
"alt+control" can both be valid key values.
Example usage::
def on_key(event):
print('you pressed', event.key, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase(object):
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event',
'close_event'
]
supports_blit = True
def __init__(self, figure):
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry()
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event', self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event', self.pick)
self.mouse_grabber = None # the axes currently grabbing mouse
self.toolbar = None # NavigationToolbar2 will set me
self._is_saving = False
if False:
## highlight the artists that are hit
self.mpl_connect('motion_notify_event', self.onHilite)
## delete the artists that are clicked on
#self.mpl_disconnect(self.button_pick_id)
#self.mpl_connect('button_press_event',self.onRemove)
def is_saving(self):
"""
Returns `True` when the renderer is in the process of saving
to a file, rather than rendering for an on-screen buffer.
"""
return self._is_saving
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
def sort_artists(artists):
# This depends on stable sort and artists returned
# from get_children in z order.
L = [(h.zorder, h) for h in artists]
L.sort()
return [h for zorder, h in L]
# Find the top artist under the cursor
under = sort_artists(self.figure.hitlist(ev))
h = None
if under:
h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
if not hasattr(self, '_active'):
self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
#print "within:"," ".join([str(x) for x in under])
#print "entering:",[str(a) for a in enter]
#print "leaving:",[str(a) for a in leave]
# On leave restore the captured colour
for a in leave:
if hasattr(a, 'get_color'):
a.set_color(self._active[a])
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a, 'get_color'):
self._active[a] = a.get_color()
elif hasattr(a, 'get_edgecolor'):
self._active[a] = (a.get_edgecolor(), a.get_facecolor())
else:
self._active[a] = None
for a in enter:
if hasattr(a, 'get_color'):
a.set_color('red')
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else:
self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def close_event(self, guiEvent=None):
"""
This method will be called by all functions connected to the
'close_event' with a :class:`CloseEvent`
"""
s = 'close_event'
try:
event = CloseEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
except (TypeError, AttributeError):
pass
# Suppress the TypeError when the python session is being killed.
# It may be that a better solution would be a mechanism to
# disconnect all callbacks upon shutdown.
# AttributeError occurs on OSX with qt4agg upon exiting
# with an open window; 'callbacks' attribute no longer exists.
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist, **kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, dblclick=False, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key,
dblclick=dblclick, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
self._lastx, self._lasty = None, None
def enter_notify_event(self, guiEvent=None, xy=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
*xy*
the coordinate location of the pointer when the canvas is
entered
"""
if xy is not None:
x, y = xy
self._lastx, self._lasty = x, y
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
"""Called when GUI is idle."""
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def grab_mouse(self, ax):
"""
Set the child axes which are currently grabbing the mouse events.
Usually called by the widgets themselves.
It is an error to call this if the mouse is already grabbed by
another axes.
"""
if self.mouse_grabber not in (None, ax):
raise RuntimeError('two different attempted to grab mouse input')
self.mouse_grabber = ax
def release_mouse(self, ax):
"""
Release the mouse grab held by the axes, ax.
Usually called by the widgets.
It is ok to call this even if you ax doesn't have the mouse
grab currently.
"""
if self.mouse_grabber is ax:
self.mouse_grabber = None
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
Return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
filetypes = {
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'pgf': 'LaTeX PGF Figure',
'png': 'Portable Network Graphics',
'ps': 'Postscript',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'}
# All of these print_* functions do a lazy import because
# a) otherwise we'd have cyclical imports, since all of these
# classes inherit from FigureCanvasBase
# b) so we don't import a bunch of stuff the user may never use
# TODO: these print_* throw ImportErrror when called from
# compare_images_decorator (decorators.py line 112)
# if the backend has not already been loaded earlier on. Simple trigger:
# >>> import matplotlib.tests.test_spines
# >>> list(matplotlib.tests.test_spines.test_spines_axes_positions())[0][0]()
def print_eps(self, *args, **kwargs):
from .backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_eps(*args, **kwargs)
def print_pdf(self, *args, **kwargs):
from .backends.backend_pdf import FigureCanvasPdf # lazy import
pdf = self.switch_backends(FigureCanvasPdf)
return pdf.print_pdf(*args, **kwargs)
def print_pgf(self, *args, **kwargs):
from .backends.backend_pgf import FigureCanvasPgf # lazy import
pgf = self.switch_backends(FigureCanvasPgf)
return pgf.print_pgf(*args, **kwargs)
def print_png(self, *args, **kwargs):
from .backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(*args, **kwargs)
def print_ps(self, *args, **kwargs):
from .backends.backend_ps import FigureCanvasPS # lazy import
ps = self.switch_backends(FigureCanvasPS)
return ps.print_ps(*args, **kwargs)
def print_raw(self, *args, **kwargs):
from .backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_raw(*args, **kwargs)
print_bmp = print_rgba = print_raw
def print_svg(self, *args, **kwargs):
from .backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svg(*args, **kwargs)
def print_svgz(self, *args, **kwargs):
from .backends.backend_svg import FigureCanvasSVG # lazy import
svg = self.switch_backends(FigureCanvasSVG)
return svg.print_svgz(*args, **kwargs)
if _has_pil:
filetypes['jpg'] = 'Joint Photographic Experts Group'
filetypes['jpeg'] = filetypes['jpg']
def print_jpg(self, filename_or_obj, *args, **kwargs):
"""
Supported kwargs:
*quality*: The image quality, on a scale from 1 (worst) to
95 (best). The default is 95, if not given in the
matplotlibrc file in the savefig.jpeg_quality parameter.
Values above 95 should be avoided; 100 completely
disables the JPEG quantization stage.
*optimize*: If present, indicates that the encoder should
make an extra pass over the image in order to select
optimal encoder settings.
*progressive*: If present, indicates that this image
should be stored as a progressive JPEG file.
"""
from .backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
buf, size = agg.print_to_buffer()
if kwargs.pop("dryrun", False):
return
image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
options = cbook.restrict_dict(kwargs, ['quality', 'optimize',
'progressive'])
if 'quality' not in options:
options['quality'] = rcParams['savefig.jpeg_quality']
return image.save(filename_or_obj, format='jpeg', **options)
print_jpeg = print_jpg
filetypes['tif'] = filetypes['tiff'] = 'Tagged Image File Format'
def print_tif(self, filename_or_obj, *args, **kwargs):
from .backends.backend_agg import FigureCanvasAgg # lazy import
agg = self.switch_backends(FigureCanvasAgg)
buf, size = agg.print_to_buffer()
if kwargs.pop("dryrun", False):
return
image = Image.frombuffer('RGBA', size, buf, 'raw', 'RGBA', 0, 1)
dpi = (self.figure.dpi, self.figure.dpi)
return image.save(filename_or_obj, format='tiff',
dpi=dpi)
print_tiff = print_tif
def get_supported_filetypes(self):
"""Return dict of savefig file formats supported by this backend"""
return self.filetypes
def get_supported_filetypes_grouped(self):
"""Return a dict of savefig file formats supported by this backend,
where the keys are a file type name, such as 'Joint Photographic
Experts Group', and the values are a list of filename extensions used
for that filetype, such as ['jpg', 'jpeg']."""
groupings = {}
for ext, name in self.filetypes.items():
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def _get_print_method(self, format):
method_name = 'print_%s' % format
# check for registered backends
if format in _backend_d:
backend_class = _backend_d[format]
def _print_method(*args, **kwargs):
backend = self.switch_backends(backend_class)
print_method = getattr(backend, method_name)
return print_method(*args, **kwargs)
return _print_method
formats = self.get_supported_filetypes()
if (format not in formats or not hasattr(self, method_name)):
formats = sorted(formats)
raise ValueError(
'Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
return getattr(self, method_name)
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure
*edgecolor*
the edgecolor of the figure
*orientation*
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
*bbox_inches*
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure. If None, use savefig.bbox
*pad_inches*
Amount of padding around the figure when bbox_inches is
'tight'. If None, use savefig.pad_inches
*bbox_extra_artists*
A list of extra artists that will be considered when the
tight bbox is calculated.
"""
if format is None:
# get format from filename, or from backend's default filetype
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
print_method = self._get_print_method(format)
if dpi is None:
dpi = rcParams['savefig.dpi']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
bbox_inches = kwargs.pop("bbox_inches", None)
if bbox_inches is None:
bbox_inches = rcParams['savefig.bbox']
if bbox_inches:
# call adjust_bbox to save only the given area
if bbox_inches == "tight":
# when bbox_inches == "tight", it saves the figure
# twice. The first save command is just to estimate
# the bounding box of the figure. A stringIO object is
# used as a temporary file object, but it causes a
# problem for some backends (ps backend with
# usetex=True) if they expect a filename, not a
# file-like object. As I think it is best to change
# the backend to support file-like object, i'm going
# to leave it as it is. However, a better solution
# than stringIO seems to be needed. -JJL
#result = getattr(self, method_name)
result = print_method(
io.BytesIO(),
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
dryrun=True,
**kwargs)
renderer = self.figure._cachedRenderer
bbox_inches = self.figure.get_tightbbox(renderer)
bbox_artists = kwargs.pop("bbox_extra_artists", None)
if bbox_artists is None:
bbox_artists = self.figure.get_default_bbox_extra_artists()
bbox_filtered = []
for a in bbox_artists:
bbox = a.get_window_extent(renderer)
if a.get_clip_on():
clip_box = a.get_clip_box()
if clip_box is not None:
bbox = Bbox.intersection(bbox, clip_box)
clip_path = a.get_clip_path()
if clip_path is not None and bbox is not None:
clip_path = clip_path.get_fully_transformed_path()
bbox = Bbox.intersection(bbox,
clip_path.get_extents())
if bbox is not None and (bbox.width != 0 or
bbox.height != 0):
bbox_filtered.append(bbox)
if bbox_filtered:
_bbox = Bbox.union(bbox_filtered)
trans = Affine2D().scale(1.0 / self.figure.dpi)
bbox_extra = TransformedBbox(_bbox, trans)
bbox_inches = Bbox.union([bbox_inches, bbox_extra])
pad = kwargs.pop("pad_inches", None)
if pad is None:
pad = rcParams['savefig.pad_inches']
bbox_inches = bbox_inches.padded(pad)
restore_bbox = tight_bbox.adjust_bbox(self.figure, format,
bbox_inches)
_bbox_inches_restore = (bbox_inches, restore_bbox)
else:
_bbox_inches_restore = None
self._is_saving = True
try:
#result = getattr(self, method_name)(
result = print_method(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
bbox_inches_restore=_bbox_inches_restore,
**kwargs)
finally:
if bbox_inches and restore_bbox:
restore_bbox()
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
self._is_saving = False
#self.figure.canvas.draw() ## seems superfluous
return result
def get_default_filetype(self):
"""
Get the default savefig file format as specified in rcParam
``savefig.format``. Returned string excludes period. Overridden
in backends that only support a single file type.
"""
return rcParams['savefig.format']
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None if there is no window (eg, a PS backend).
"""
if hasattr(self, "manager"):
return self.manager.get_window_title()
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (eg, a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def get_default_filename(self):
"""
Return a string, which includes extension, suitable for use as
a default filename.
"""
default_filename = self.get_window_title() or 'image'
default_filename = default_filename.lower().replace(' ', '_')
return default_filename + '.' + self.get_default_filetype()
def switch_backends(self, FigureCanvasClass):
"""
Instantiate an instance of FigureCanvasClass
This is used for backend switching, eg, to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (eg, setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
newCanvas._is_saving = self._is_saving
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
- 'figure_enter_event',
- 'figure_leave_event',
- 'axes_enter_event',
- 'axes_leave_event'
- 'close_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = canvas.mpl_connect('button_press_event', on_press)
"""
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
Disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting periodic
events through the backend's native event loop. Implemented only for
backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerBase(*args, **kwargs)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self, timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
Call signature::
start_event_loop_default(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str, mplDeprecation)
if timeout <= 0:
timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter * timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
self._looping = False
def key_press_handler(event, canvas, toolbar=None):
"""
Implement the default mpl key bindings for the canvas and toolbar
described at :ref:`key-event-handling`
*event*
a :class:`KeyEvent` instance
*canvas*
a :class:`FigureCanvasBase` instance
*toolbar*
a :class:`NavigationToolbar2` instance
"""
# these bindings happen whether you are over an axes or not
if event.key is None:
return
# Load key-mappings from your matplotlibrc file.
fullscreen_keys = rcParams['keymap.fullscreen']
home_keys = rcParams['keymap.home']
back_keys = rcParams['keymap.back']
forward_keys = rcParams['keymap.forward']
pan_keys = rcParams['keymap.pan']
zoom_keys = rcParams['keymap.zoom']
save_keys = rcParams['keymap.save']
quit_keys = rcParams['keymap.quit']
grid_keys = rcParams['keymap.grid']
toggle_yscale_keys = rcParams['keymap.yscale']
toggle_xscale_keys = rcParams['keymap.xscale']
all = rcParams['keymap.all_axes']
# toggle fullscreen mode (default key 'f')
if event.key in fullscreen_keys:
canvas.manager.full_screen_toggle()
# quit the figure (defaut key 'ctrl+w')
if event.key in quit_keys:
Gcf.destroy_fig(canvas.figure)
if toolbar is not None:
# home or reset mnemonic (default key 'h', 'home' and 'r')
if event.key in home_keys:
toolbar.home()
# forward / backward keys to enable left handed quick navigation
# (default key for backward: 'left', 'backspace' and 'c')
elif event.key in back_keys:
toolbar.back()
# (default key for forward: 'right' and 'v')
elif event.key in forward_keys:
toolbar.forward()
# pan mnemonic (default key 'p')
elif event.key in pan_keys:
toolbar.pan()
# zoom mnemonic (default key 'o')
elif event.key in zoom_keys:
toolbar.zoom()
# saving current figure (default key 's')
elif event.key in save_keys:
toolbar.save_figure()
if event.inaxes is None:
return
# these bindings require the mouse to be over an axes to trigger
# switching on/off a grid in current axes (default key 'g')
if event.key in grid_keys:
event.inaxes.grid()
canvas.draw()
# toggle scaling of y-axes between 'log and 'linear' (default key 'l')
elif event.key in toggle_yscale_keys:
ax = event.inaxes
scale = ax.get_yscale()
if scale == 'log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale == 'linear':
ax.set_yscale('log')
ax.figure.canvas.draw()
# toggle scaling of x-axes between 'log and 'linear' (default key 'k')
elif event.key in toggle_xscale_keys:
ax = event.inaxes
scalex = ax.get_xscale()
if scalex == 'log':
ax.set_xscale('linear')
ax.figure.canvas.draw()
elif scalex == 'linear':
ax.set_xscale('log')
ax.figure.canvas.draw()
elif (event.key.isdigit() and event.key != '0') or event.key in all:
# keys in list 'all' enables all axes (default key 'a'),
# otherwise if key is a number only enable this particular axes
# if it was the axes, where the event was raised
if not (event.key in all):
n = int(event.key) - 1
for i, a in enumerate(canvas.figure.get_axes()):
# consider axes, in which the event was raised
# FIXME: Why only this axes?
if event.x is not None and event.y is not None \
and a.in_axes(event):
if event.key in all:
a.set_navigate(True)
else:
a.set_navigate(i == n)
class NonGuiException(Exception):
pass
class FigureManagerBase:
"""
Helper class for pyplot mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure number
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
self.key_press_handler_id = self.canvas.mpl_connect('key_press_event',
self.key_press)
"""
The returned id from connecting the default key handler via
:meth:`FigureCanvasBase.mpl_connnect`.
To disable default key press handling::
manager, canvas = figure.canvas.manager, figure.canvas
canvas.mpl_disconnect(manager.key_press_handler_id)
"""
def show(self):
"""
For GUI backends, show the figure window and redraw.
For non-GUI backends, raise an exception to be caught
by :meth:`~matplotlib.figure.Figure.show`, for an
optional warning.
"""
raise NonGuiException()
def destroy(self):
pass
def full_screen_toggle(self):
pass
def resize(self, w, h):
""""For gui backends, resize the window (in pixels)."""
pass
def key_press(self, event):
"""
Implement the default mpl key bindings defined at
:ref:`key-event-handling`
"""
key_press_handler(event, self.canvas, self.canvas.toolbar)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None for non-GUI backends (eg, a PS backend).
"""
return 'image'
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect for non-GUI backends (eg, a PS backend).
"""
pass
class Cursors:
# this class is only used as a simple namespace
HAND, POINTER, SELECT_REGION, MOVE = list(range(4))
cursors = Cursors()
class NavigationToolbar2(object):
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
# list of toolitems to add to the toolbar, format is:
# (
# text, # the text of the button (often not visible to users)
# tooltip_text, # the tooltip shown on hover (where possible)
# image_file, # name of the image for the button (without the extension)
# name_of_method, # name of the method in NavigationToolbar2 to call
# )
toolitems = (
('Home', 'Reset original view', 'home', 'home'),
('Back', 'Back to previous view', 'back', 'back'),
('Forward', 'Forward to next view', 'forward', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),
('Save', 'Save the figure', 'filesave', 'save_figure'),
)
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time
# of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
self._ids_zoom = []
self._zoom_mode = None
self._button_pressed = None # determined by the button pressed
# at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
"""Display a message on toolbar or in status bar"""
pass
def back(self, *args):
"""move back up the view lim stack"""
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
"""Draw a rectangle rubberband to indicate zoom limits"""
pass
def forward(self, *args):
"""Move forward in the view lim stack"""
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
"""Restore the original view"""
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def mouse_move(self, event):
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active == 'ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
elif (self._active == 'PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
if event.inaxes and event.inaxes.get_navigate():
try:
s = event.inaxes.format_coord(event.xdata, event.ydata)
except (ValueError, OverflowError):
pass
else:
if len(self.mode):
self.set_message('%s, %s' % (self.mode, s))
else:
self.set_message(s)
else:
self.set_message(self.mode)
def pan(self, *args):
"""Activate the pan/zoom tool. pan with left button, zoom with right"""
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
"""Called whenver a mouse button is pressed."""
pass
def press_pan(self, event):
"""the press mouse button in pan/zoom mode callback"""
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_pan()):
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect('motion_notify_event',
self.drag_pan)
self.press(event)
def press_zoom(self, event):
"""the press mouse button in zoom to rect mode callback"""
# If we're already in the middle of a zoom, pressing another
# button works to "cancel"
if self._ids_zoom != []:
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self.release(event)
self.draw()
self._xypress = None
self._button_pressed = None
self._ids_zoom = []
return
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_zoom()):
self._xypress.append((x, y, a, i, a.viewLim.frozen(),
a.transData.frozen()))
id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)
id2 = self.canvas.mpl_connect('key_press_event',
self._switch_on_zoom_mode)
id3 = self.canvas.mpl_connect('key_release_event',
self._switch_off_zoom_mode)
self._ids_zoom = id1, id2, id3
self._zoom_mode = event.key
self.press(event)
def _switch_on_zoom_mode(self, event):
self._zoom_mode = event.key
self.mouse_move(event)
def _switch_off_zoom_mode(self, event):
self._zoom_mode = None
self.mouse_move(event)
def push_current(self):
"""push the current view limits and position onto the stack"""
lims = []
pos = []
for a in self.canvas.figure.get_axes():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lims.append((xmin, xmax, ymin, ymax))
# Store both the original and modified positions
pos.append((
a.get_position(True).frozen(),
a.get_position().frozen()))
self._views.push(lims)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
"""this will be called whenever mouse button is released"""
pass
def release_pan(self, event):
"""the release mouse button callback in pan/zoom mode"""
if self._button_pressed is None:
return
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress:
return
self._xypress = []
self._button_pressed = None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
"""the drag callback in pan/zoom mode"""
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def drag_zoom(self, event):
"""the drag callback in zoom mode"""
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = self._xypress[0]
# adjust x, last, y, last
x1, y1, x2, y2 = a.bbox.extents
x, lastx = max(min(x, lastx), x1), min(max(x, lastx), x2)
y, lasty = max(min(y, lasty), y1), min(max(y, lasty), y2)
if self._zoom_mode == "x":
x1, y1, x2, y2 = a.bbox.extents
y, lasty = y1, y2
elif self._zoom_mode == "y":
x1, y1, x2, y2 = a.bbox.extents
x, lastx = x1, x2
self.draw_rubberband(event, x, y, lastx, lasty)
def release_zoom(self, event):
"""the release mouse button callback in zoom to rect mode"""
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self._ids_zoom = []
if not self._xypress:
return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x - lastx) < 5 or abs(y - lasty) < 5:
self._xypress = None
self.release(event)
self.draw()
return
x0, y0, x1, y1 = lim.extents
# zoom to rect
inverse = a.transData.inverted()
lastx, lasty = inverse.transform_point((lastx, lasty))
x, y = inverse.transform_point((x, y))
Xmin, Xmax = a.get_xlim()
Ymin, Ymax = a.get_ylim()
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a, la):
twinx = True
if a.get_shared_y_axes().joined(a, la):
twiny = True
last_a.append(a)
if twinx:
x0, x1 = Xmin, Xmax
else:
if Xmin < Xmax:
if x < lastx:
x0, x1 = x, lastx
else:
x0, x1 = lastx, x
if x0 < Xmin:
x0 = Xmin
if x1 > Xmax:
x1 = Xmax
else:
if x > lastx:
x0, x1 = x, lastx
else:
x0, x1 = lastx, x
if x0 > Xmin:
x0 = Xmin
if x1 < Xmax:
x1 = Xmax
if twiny:
y0, y1 = Ymin, Ymax
else:
if Ymin < Ymax:
if y < lasty:
y0, y1 = y, lasty
else:
y0, y1 = lasty, y
if y0 < Ymin:
y0 = Ymin
if y1 > Ymax:
y1 = Ymax
else:
if y > lasty:
y0, y1 = y, lasty
else:
y0, y1 = lasty, y
if y0 > Ymin:
y0 = Ymin
if y1 < Ymax:
y1 = Ymax
if self._button_pressed == 1:
if self._zoom_mode == "x":
a.set_xlim((x0, x1))
elif self._zoom_mode == "y":
a.set_ylim((y0, y1))
else:
a.set_xlim((x0, x1))
a.set_ylim((y0, y1))
elif self._button_pressed == 3:
if a.get_xscale() == 'log':
alpha = np.log(Xmax / Xmin) / np.log(x1 / x0)
rx1 = pow(Xmin / x0, alpha) * Xmin
rx2 = pow(Xmax / x0, alpha) * Xmin
else:
alpha = (Xmax - Xmin) / (x1 - x0)
rx1 = alpha * (Xmin - x0) + Xmin
rx2 = alpha * (Xmax - x0) + Xmin
if a.get_yscale() == 'log':
alpha = np.log(Ymax / Ymin) / np.log(y1 / y0)
ry1 = pow(Ymin / y0, alpha) * Ymin
ry2 = pow(Ymax / y0, alpha) * Ymin
else:
alpha = (Ymax - Ymin) / (y1 - y0)
ry1 = alpha * (Ymin - y0) + Ymin
ry2 = alpha * (Ymax - y0) + Ymin
if self._zoom_mode == "x":
a.set_xlim((rx1, rx2))
elif self._zoom_mode == "y":
a.set_ylim((ry1, ry2))
else:
a.set_xlim((rx1, rx2))
a.set_ylim((ry1, ry2))
self.draw()
self._xypress = None
self._button_pressed = None
self._zoom_mode = None
self.push_current()
self.release(event)
def draw(self):
"""Redraw the canvases, update the locators"""
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw_idle()
def _update_view(self):
"""Update the viewlim and position from the view and
position stack for each axes
"""
lims = self._views()
if lims is None:
return
pos = self._positions()
if pos is None:
return
for i, a in enumerate(self.canvas.figure.get_axes()):
xmin, xmax, ymin, ymax = lims[i]
a.set_xlim((xmin, xmax))
a.set_ylim((ymin, ymax))
# Restore both the original and modified positions
a.set_position(pos[i][0], 'original')
a.set_position(pos[i][1], 'active')
self.canvas.draw_idle()
def save_figure(self, *args):
"""Save the current figure"""
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
"""Reset the axes stack"""
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
"""Activate zoom to rect mode"""
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event',
self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event',
self.release_zoom)
self.mode = 'zoom rect'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
"""Enable or disable back/forward button"""
pass
| gpl-3.0 |
Sentient07/scikit-learn | examples/calibration/plot_calibration_curve.py | 113 | 5904 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.model_selection import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration curve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration curve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
MadsJensen/biomeg_class | classification.py | 1 | 1551 | import xgboost as xgb
import numpy as np
import mne
from sklearn.cross_validation import StratifiedShuffleSplit, cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.externals import joblib
from my_settings import *
subject = 1
epochs = mne.read_epochs(data_folder + "sub_%s-epo.fif" % subject)
data_shape = epochs.get_data().shape
X = epochs[:239].get_data().reshape([239, data_shape[1] * data_shape[2]])
y = (epochs[:239].events[:, 2] != 4).astype("int")
cv = StratifiedShuffleSplit(y, test_size=0.1)
cv_params = {"learning_rate": np.arange(0.1, 1.1, 0.2),
"max_depth": [1, 3, 5, 7],
"n_estimators": np.arange(100, 1100, 100)}
grid = GridSearchCV(xgb.XGBClassifier(),
cv_params,
scoring='roc_auc',
cv=cv,
n_jobs=-1,
verbose=1)
grid.fit(X, y)
xgb_cv = grid.best_estimator_
joblib.dump(xgb_cv, class_data + "sub_%s-xgb_model.pkl" % subject)
cv_params = {"learning_rate": np.arange(0.1, 1.1, 0.1),
'n_estimators': np.arange(1, 2000, 200)}
grid_ada = GridSearchCV(AdaBoostClassifier(),
cv_params,
scoring='roc_auc',
cv=cv,
n_jobs=-1,
verbose=1)
grid_ada.fit(X, y)
ada_cv = grid_ada.best_estimator_
scores = cross_val_score(ada_cv, X, y, cv=cv)
joblib.dump(ada_cv, class_data + "sub_%s-ada_model.pkl" % subject)
| bsd-3-clause |
arvidfm/masters-thesis | src/utils.py | 1 | 23811 | # Copyright (C) 2016 Arvid Fahlström Myrman
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import collections
import concurrent.futures
import itertools
import math
import os
import pathlib
import pickle
import random
import numpy as np
import scipy.io.wavfile
import scipy.misc
import scipy.spatial.distance
import scipy.stats
import numba
import sklearn.metrics
import click
import buckeye
import dataset
import features
from _train import *
@click.group()
def main():
pass
@main.command()
@click.argument('hdf5file', type=dataset.HDF5TYPE)
@click.option('-i', '--inset', required=True)
@click.option('-o', '--outfile', type=click.File('wb'), required=True)
@click.option('--sample-rate', type=int, required=True)
def to_wav(hdf5file, inset, outfile, sample_rate):
data = hdf5file[inset].data[:]
scipy.io.wavfile.write(outfile, sample_rate, data)
@main.command()
@click.argument('hdf5file', type=dataset.HDF5TYPE)
@click.option('-i', '--inset', required=True)
def list_sections(hdf5file, inset):
for section in hdf5file[inset]:
print(section.name)
@main.command()
@click.argument('infile', type=click.File('rb'))
@click.option('-D', required=True, type=int)
@click.option('--vad', required=True, type=click.File('r'))
@click.option('--window-shift', default=10)
@click.option('-o', '--output-dir', required=True,
type=click.Path(file_okay=False))
def split_plp(infile, d, vad, window_shift, output_dir):
data = np.fromfile(infile, dtype='f4').reshape(-1, d)
vad = buckeye.parse_vad(vad)
outpath = pathlib.Path(output_dir)
name, ext = infile.name.split("/")[-1].split(".", 1)
window_shift /= 1000
os.makedirs(output_dir, exist_ok=True)
for i, (start, end) in enumerate(vad[name]):
fstart, fend = int(start / window_shift), int(end / window_shift)
outname = "{}.{}".format(name, i)
print(name, outname, start, end)
data[fstart:fend].tofile(str(outpath / (outname + "." + ext)))
def sec_to_frame(start, stop, window_length, window_shift):
fstart = max(0, math.ceil((start - window_length/2) / window_shift))
fstop = math.floor((stop - window_length/2) / window_shift)
return fstart, fstop
@main.command()
@click.argument('hdf5file', type=dataset.HDF5TYPE)
@click.option('-i', '--inset', required=True)
@click.option('-o', '--outset', required=True)
@click.option('--vad', required=True, type=click.File('r'))
@click.option('--window-shift', required=True, type=int)
@click.option('--window-length', required=True, type=int)
def extract_vad(hdf5file, inset, outset, vad, window_shift, window_length):
datset = hdf5file[inset]
outset = hdf5file.create_dataset(outset, datset.dims, datset.data.dtype,
overwrite=True)
vad = buckeye.parse_vad(vad)
window_shift /= 1000
window_length /= 1000
for speaker in datset:
print(speaker.name)
speaker_sec = outset.create_section(speaker.name)
for recording in speaker:
recording_sec = speaker_sec.create_section(recording.name)
data = recording.data
for start, stop in vad[recording.name]:
# get frames whose middle points are within (start, stop)
# from n * window_shift + window_length/2 = t
fstart, fstop = sec_to_frame(start, stop, window_length, window_shift)
secdat = data[fstart:fstop]
recording_sec.create_section(data=secdat,
metadata=(start, stop))
@numba.jit(nopython=True, nogil=True)
def numba_sum(a):
x = np.zeros((a.shape[0], 1))
for i in range(a.shape[0]):
x[i] = a[i].sum()
return x
@numba.jit(nopython=True, nogil=True)
def numba_reverse(l):
for i in range(len(l) // 2):
l[i], l[-i-1] = l[-i-1], l[i]
@numba.jit(nopython=True, nogil=True)
def kullback_leibler(a, b, eps=np.finfo(np.dtype('f4')).eps):
a = a + eps
b = b + eps
a /= numba_sum(a)
b /= numba_sum(b)
ent = numba_sum(a * np.log2(a))
cross = a @ np.log2(b.T)
return ent - cross
def jensen_shannon(a, b, eps=np.finfo(np.dtype('f4')).eps):
a = a[:,np.newaxis] + eps
b = b[np.newaxis,:] + eps
a /= a.sum(axis=-1)[...,np.newaxis]
b /= b.sum(axis=-1)[...,np.newaxis]
m = (a + b)/2
return ((a * np.log2(a/m) + b * np.log2(b/m))/2).sum(axis=-1)
@numba.jit(nopython=True, nogil=True)
def align(dist):
D = np.zeros((dist.shape[0] + 1, dist.shape[1] + 1))
B = np.zeros((dist.shape[0] + 1, dist.shape[1] + 1, 2), dtype=np.int32)
D[:,0] = D[0,:] = np.inf
D[0,0] = 0
for i in range(dist.shape[0]):
for j in range(dist.shape[1]):
D[i+1,j+1] = min(D[i,j], D[i,j+1], D[i+1,j]) + dist[i,j]
B[i+1,j+1] = ([i,j] if D[i,j] < D[i,j+1] and D[i,j] < D[i+1,j]
else ([i,j+1] if D[i,j+1] < D[i+1,j] else [i+1,j]))
indices1 = []
indices2 = []
i = np.array(dist.shape, dtype=np.int32)
while i[0] != 0 and i[1] != 0:
indices1.append(i[0] - 1)
indices2.append(i[1] - 1)
i = B[i[0],i[1]]
numba_reverse(indices1)
numba_reverse(indices2)
return indices1, indices2, D[-1,-1]
def binary_search(transcription, point):
start, end = 0, len(transcription) - 1
while start <= end:
middle = (start + end) // 2
x = transcription[middle]
if point < x[0]:
end = middle - 1
elif point >= x[1]:
start = middle + 1
else:
break
else: # nobreak
raise RuntimeError("Couldn't find corresponding transcription")
return x
@main.command()
@click.argument('hdf5file', type=dataset.Hdf5Type('r'))
@click.option('-i', '--inset', required=True)
@click.option('--window-shift', type=int, required=True)
@click.option('--window-length', type=int, required=True)
@click.option('--model', type=click.File('rb'))
@click.option('--save-averages', type=click.File('wb'), required=True)
def label_frames(hdf5file, inset, window_shift, window_length, model, save_averages):
datset = hdf5file[inset]
window_shift, window_length = window_shift / 1000, window_length / 1000
if model is not None:
_, _, layer, _ = pickle.load(model)
W = layer._W.get_value()[0]
W = W[:,W.sum(axis=0) > 0]
speakers = {}
for speaker in datset:
print(speaker.name)
avgs = {}
for recording in speaker:
transcription = iter(recording.metadata)
start, stop, phoneme = next(transcription)
framewise = []
frames = recording.data[:]
if model is not None:
frames = frames @ W
for i in range(frames.shape[0]):
t = window_shift * i + window_length / 2
try:
while t > stop:
start, stop, phoneme = next(transcription)
except StopIteration:
# just use the last value
pass
if phoneme not in avgs:
avgs[phoneme] = np.zeros(frames.shape[-1])
avgs[phoneme] += frames[i]
speakers[speaker.name] = avgs
pickle.dump(speakers, save_averages)
def parse_clusters(clusterfile, recording_speaker_mapping=None, return_clusters=False):
clusters = []
cur_cluster = None
for line in clusterfile:
if line.startswith("Class"):
if cur_cluster is not None:
clusters.append(cur_cluster)
cur_cluster = []
elif line == "\n":
pass
else:
recording, start, stop = line.split()
if recording_speaker_mapping is None:
this = recording, float(start), float(stop)
else:
this = (recording_speaker_mapping[recording], recording,
float(start), float(stop))
cur_cluster.append(this)
clusters.append(cur_cluster)
if return_clusters:
return clusters
same_pairs = []
for i, cluster in enumerate(clusters, 1):
for this, other in itertools.combinations(cluster, r=2):
word1, word2 = (this, other) if random.random() < 0.5 else (other, this)
same_pairs.append((i, i, word1, word2))
return same_pairs
def samespeaker_ratio(same_pairs):
return sum(1 for _, _, a, b in same_pairs if a[0] == b[0]) / len(same_pairs)
def sample_mismatches(same_pairs, ratio=None, mismatch_ratio=1, sample_triples=False):
ratio = ratio or samespeaker_ratio(same_pairs)
diff_pairs = []
same_pairs_flattened = [(cluster, word)
for cluster, _, w1, w2 in same_pairs
for word in (w1, w2)]
for i in range(int(len(same_pairs) * mismatch_ratio)):
same_speaker = random.random() < ratio
# repeat until successfully sampled
while True:
try:
if sample_triples:
word1 = same_pairs[i][0], same_pairs[i][2]
else:
word1 = random.choice(same_pairs_flattened)
word2 = random.choice([
(cluster, word) for cluster, word in same_pairs_flattened
if cluster != word1[0] and same_speaker == (word[0] == word1[1][0])])
except IndexError:
pass
else:
break
diff_pairs.append((word1[0], word2[0], word1[1], word2[1]))
return diff_pairs
@numba.jit(nogil=True)
def calculate_similarities(similarities, fragments, i,
symmetrize=False, js=False, cos=False):
if symmetrize or js or cos:
start = i
else:
start = 0
for j in range(start, len(fragments)):
if js:
dist = np.sqrt(jensen_shannon(fragments[i], fragments[j]) + 1e-7)
elif cos:
dist = scipy.spatial.distance.cdist(fragments[i], fragments[j], metric='cosine')
elif symmetrize:
dist = (kullback_leibler(fragments[i], fragments[j]) +
kullback_leibler(fragments[j], fragments[i]).T) / 2
else:
dist = kullback_leibler(fragments[i], fragments[j])
similarities[i,j] = align(dist)[-1]
if symmetrize or js or cos:
similarities[i:,i] = similarities[i,i:]
return i
@main.command()
@click.argument('hdf5file', type=dataset.Hdf5Type('r'))
@click.option('-i', '--inset', required=True)
@click.option('--clusters', type=click.File('r'), required=True)
@click.option('--num-clusters', type=int, default=1000, show_default=True)
@click.option('--distance', type=click.Choice(['kl', 'kl-sym', 'js', 'cos']), required=True)
@click.option('--window-shift', type=int, required=True)
@click.option('--window-length', type=int, required=True)
@click.option('--model', type=click.File('rb'))
@click.option('--save-score', type=click.File('w'))
@click.option('--num-threads', type=int, default=12, show_default=True)
def evaluate(hdf5file, inset, clusters, distance, window_shift, window_length,
save_score, num_clusters, model, num_threads):
datset = hdf5file[inset]
mapping = datset.metadata
clusters = parse_clusters(clusters, mapping, return_clusters=True)[:num_clusters]
window_length, window_shift = window_length / 1000, window_shift / 1000
def get_data(x):
s, rec, start, stop = x
start, stop = sec_to_frame(start, stop, window_length, window_shift)
return datset[s][rec].data[start:stop]
fragments = [get_data(fragment) for cluster in clusters for fragment in cluster]
hdf5file.close()
if model is not None:
import train
get_output, _, _ = train.open_model(model)
fragments = [get_output(fragment) for fragment in fragments]
similarities = np.zeros((len(fragments), len(fragments)))
indices = list(range(len(fragments)))
threads = concurrent.futures.ThreadPoolExecutor(num_threads)
result = threads.map(lambda i: calculate_similarities(
similarities, fragments, i,
symmetrize=distance == 'kl-sym', js=distance == 'js',
cos=distance == 'cos'), indices)
print("Running threads...")
for i, _ in enumerate(result, 1):
print("{}/{}".format(i, len(fragments)))
print("Calculating silhouette score...")
labels = np.array([i for i, cluster in enumerate(clusters) for _ in cluster])
score = sklearn.metrics.silhouette_score(similarities, labels, metric='precomputed')
print("Silhouette score: {}".format(score))
if save_score is not None:
save_score.write(str(score) + '\n')
@main.command()
@click.argument('hdf5file', type=dataset.HDF5TYPE)
@click.option('-i', '--inset', required=True)
@click.option('-o', '--outset', required=True)
@click.option('--clusters', type=click.File('r'), required=True)
@click.option('--window-shift', type=int)
@click.option('--window-length', type=int)
@click.option('--sample-rate', type=int)
@click.option('--sample-triples', is_flag=True)
@click.option('--validation-ratio', default=0.3, show_default=True)
@click.option('--mismatch-ratio', default=1, show_default=True)
def from_matchlist(hdf5file, inset, outset, clusters, window_shift, window_length,
sample_rate, validation_ratio, sample_triples, mismatch_ratio):
datset = hdf5file[inset]
data = datset.data
mapping = datset.metadata
if sample_rate is None and (window_shift is None or window_length is None):
raise click.UsageError("Must supply either --sample-rate or "
"--window-shift and --window-length.")
if window_shift is not None and window_length is not None:
window_shift /= 1000
window_length /= 1000
speakers = sorted(sec.name for sec in datset)
speaker_ids = {s: i for i, s in enumerate(speakers)}
outset_ids = hdf5file.create_dataset(
outset + "_ids", (2,), 'i2', overwrite=True)
outset_ids.metadata = speaker_ids
outset = hdf5file.create_dataset(outset, (2, *data.shape[1:]),
'f4', overwrite=True)
same_pairs = parse_clusters(clusters, mapping)
# percentage of positive examples belonging to the same speaker
ratio = samespeaker_ratio(same_pairs)
validation_split = int(len(same_pairs) * validation_ratio)
random.shuffle(same_pairs)
same_valid, same_train = same_pairs[:validation_split], same_pairs[validation_split:]
diff_train = sample_mismatches(same_train, ratio=ratio, mismatch_ratio=mismatch_ratio,
sample_triples=sample_triples)
diff_valid = sample_mismatches(same_valid, ratio=ratio, mismatch_ratio=mismatch_ratio,
sample_triples=sample_triples)
all_pairs = [[same_train, diff_train], [same_valid, diff_valid]]
def get_time(start, stop):
if window_length is not None and window_shift is not None:
return sec_to_frame(start, stop, window_length, window_shift)
else:
return int(start * sample_rate), int(stop * sample_rate)
frame_count = 0
for sec, data in zip(('train', 'valid'), all_pairs):
sec_ids = outset_ids.create_section(sec)
sec_data = outset.create_section(sec)
for subsec, subdata in zip(('same', 'diff'), data):
subsec_ids = sec_ids.create_section(subsec)
subsec_data = sec_data.create_section(subsec)
for i, (w1, w2, (s1, rec1, start1, stop1), \
(s2, rec2, start2, stop2)) in enumerate(subdata, 1):
print("Generating {}-{} pair {}/{}...".format(sec, subsec, i, len(subdata)))
start1, stop1 = get_time(start1, stop1)
start2, stop2 = get_time(start2, stop2)
feat1 = datset[s1][rec1].data[start1:stop1]
feat2 = datset[s2][rec2].data[start2:stop2]
if subsec == 'same':
dist = scipy.spatial.distance.cdist(feat1, feat2, metric='euclidean')
indices1, indices2, _ = align(dist)
feat1 = feat1[indices1]
feat2 = feat2[indices2]
else:
length = min(feat1.shape[0], feat2.shape[0])
feat1, feat2 = feat1[:length], feat2[:length]
subsec_ids.create_section(
data=np.stack((np.repeat(speaker_ids[s1], feat1.shape[0]),
np.repeat(speaker_ids[s2], feat2.shape[0])),
axis=1))
subsec_data.create_section(data=np.stack((feat1, feat2), axis=1))
frame_count += feat1.shape[0]
print("Wrote a total of {} frames.".format(frame_count))
print("Same-speaker ratio:", ratio)
#with open('frame_matches.pkl', 'wb') as f:
# pickle.dump(frame_matches, f)
@main.command()
@click.option('--dedups', type=click.File('r'), required=True)
@click.option('--nodes', type=click.File('r'), required=True)
@click.option('--splitlist', type=click.File('r'))
def matchlist_to_clusters(dedups, nodes, splitlist):
if splitlist is not None:
splitlist = {fragment: (speaker, float(start), float(stop))
for speaker, fragment, start, stop in (
line.split() for line in splitlist)}
nodelist = []
for node in nodes:
fragment, match_start, match_stop, _, _, _ = node.split()
match_start, match_stop = int(match_start) / 100, int(match_stop) / 100
if splitlist is None:
f, start, stop = fragment, match_start, match_stop
else:
f, frag_start, frag_stop = splitlist[fragment]
start, stop = frag_start + match_start, frag_start + match_stop
nodelist.append((f, start, stop))
for i, cluster in enumerate(dedups, 1):
print("Class", i)
for node in cluster.split():
node = int(node) - 1
f, start, stop = nodelist[node]
print("{} {:.2f} {:.2f}".format(f, start, stop))
print()
@main.command()
@click.option('--corpuspath', type=click.Path(exists=True), required=True)
@click.option('--clusters', type=click.File('w'), required=True)
@click.option('--nodes', type=click.File('w'), required=True)
@click.option('--files', type=click.File('r'), required=True)
def generate_gold_clusters(corpuspath, clusters, nodes, files):
files = {f.split(".")[0] for f in files}
ratio = 0.7318448883666275
transcriptions = {}
corpus = pathlib.Path(corpuspath)
for speaker in corpus.iterdir():
for recording in speaker.iterdir():
if recording.name in files:
transcription = recording / (recording.name + ".words")
transcriptions[recording.name] = buckeye.parse_esps(transcription)[0]
words = collections.defaultdict(lambda: [])
for recording, transcription in transcriptions.items():
for start, stop, word in transcription:
if word[0] not in ('{', '<'):
words[word].append((recording, start, stop))
words = {word: cluster for word, cluster in words.items() if len(cluster) >= 2}
for cluster in words.values():
random.shuffle(cluster)
final_clusters = []
while len(final_clusters) < 4800:
word, cluster = random.choice(list(words.items()))
if len(cluster) == 2:
cluster_len = 2
else:
p = min(1, 2 * np.log2(len(cluster)) / len(cluster)**1.4)
cluster_len = max(2, np.random.binomial(len(cluster), p))
extract, leave = cluster[:cluster_len], cluster[cluster_len:]
if len(leave) <= 1:
del words[word]
else:
words[word] = leave
final_clusters.append(extract)
i = 1
for cluster in final_clusters:
clusters.write(" ".join(str(x) for x in range(i, i + len(cluster))) + "\n")
for recording, start, stop in cluster:
nodes.write("{}\t{}\t{}\t1.0\t0.0\t0.0\n".format(recording, int(start*100), int(stop*100)))
i += len(cluster)
def predict_proba_uniform(gmm, X):
likelihoods = gmm._estimate_log_prob(X)
norm = scipy.misc.logsumexp(likelihoods, axis=1)
return np.exp(likelihoods - norm[:,np.newaxis])
@main.command()
@click.argument('hdf5file', type=dataset.HDF5TYPE)
@click.option('-i', '--inset', required=True)
@click.option('-o', '--outset', required=True)
@click.option('--gmm-set')
@click.option('--universal-gmm', type=click.File('rb'))
@click.option('--uniform-prior', is_flag=True)
def extract_posteriors(hdf5file, inset, outset, gmm_set, universal_gmm, uniform_prior):
if universal_gmm is not None:
gmm = pickle.load(universal_gmm)
else:
gmm = None
datset = hdf5file[inset]
if gmm_set is None:
gmm_set = datset
else:
gmm_set = hdf5file[gmm_set]
@features.feature_extractor()
def extract_gmm(**kwargs):
if universal_gmm is None:
dims = gmm_set[0].metadata.n_components
else:
dims = gmm.n_components
def extractor(data):
if uniform_prior:
return predict_proba_uniform(gmm, data)
else:
return gmm.predict_proba(data)
return extractor, (dims,)
def update_gmm(sec, level):
nonlocal gmm
if level == 1:
print(sec.name)
if universal_gmm is None and level == 1:
gmm = gmm_set[sec.name].metadata
extractor, dims = extract_gmm()
outset = hdf5file.create_dataset(outset, dims, 'f4', overwrite=True)
features.transform_dataset(extractor, datset, outset, callback=update_gmm)
@main.command()
@click.argument('hdf5file', type=dataset.HDF5TYPE)
@click.option('-i', '--inset', required=True)
@click.option('-c', '--clusters', type=int, default=128)
@click.option('--universal', type=click.Path())
@click.option('--covariance', type=click.Choice(['full', 'diag']), required=True)
def cluster(hdf5file, inset, clusters, universal, covariance):
import pickle
import sklearn.mixture
def _cluster(data, **kwargs):
default_params = {"init_params": 'random'}
params = {**default_params, **kwargs}
print("Clustering with parameters", params)
gmm = sklearn.mixture.GaussianMixture(
n_components=clusters, verbose=2,
covariance_type=covariance,
**params)
gmm.fit(data)
return gmm
dset = hdf5file[inset]
if universal is None:
for section in dset:
data = section.data[:]
print("Clustering section {} of size {}".format(section.name, data.shape))
section.metadata = _cluster(data)
else:
data = dset.data[:]
print("Clustering all data of size {}".format(data.shape))
gmm = _cluster(data, max_iter=200, tol=0.0001)
with open(universal, 'wb') as f:
pickle.dump(gmm, f)
if __name__ == '__main__':
main()
| gpl-2.0 |
jor-/scipy | scipy/ndimage/fourier.py | 15 | 11266 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy
from . import _ni_support
from . import _nd_image
__all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid',
'fourier_shift']
def _get_output_fourier(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128,
numpy.float32]:
output = numpy.zeros(input.shape, dtype=input.dtype)
else:
output = numpy.zeros(input.shape, dtype=numpy.float64)
elif type(output) is type:
if output not in [numpy.complex64, numpy.complex128,
numpy.float32, numpy.float64]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype=output)
elif output.shape != input.shape:
raise RuntimeError("output shape not correct")
return output
def _get_output_fourier_complex(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128]:
output = numpy.zeros(input.shape, dtype=input.dtype)
else:
output = numpy.zeros(input.shape, dtype=numpy.complex128)
elif type(output) is type:
if output not in [numpy.complex64, numpy.complex128]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype=output)
elif output.shape != input.shape:
raise RuntimeError("output shape not correct")
return output
def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None):
"""
Multi-dimensional Gaussian fourier filter.
The array is multiplied with the fourier transform of a Gaussian
kernel.
Parameters
----------
input : array_like
The input array.
sigma : float or sequence
The sigma of the Gaussian kernel. If a float, `sigma` is the same for
all axes. If a sequence, `sigma` has to contain one value for each
axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_gaussian : ndarray
The filtered input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_gaussian(input_, sigma=4)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
sigmas = numpy.asarray(sigmas, dtype=numpy.float64)
if not sigmas.flags.contiguous:
sigmas = sigmas.copy()
_nd_image.fourier_filter(input, sigmas, n, axis, output, 0)
return output
def fourier_uniform(input, size, n=-1, axis=-1, output=None):
"""
Multi-dimensional uniform fourier filter.
The array is multiplied with the fourier transform of a box of given
size.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_uniform : ndarray
The filtered input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_uniform(input_, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype=numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 1)
return output
def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None):
"""
Multi-dimensional ellipsoid fourier filter.
The array is multiplied with the fourier transform of a ellipsoid of
given sizes.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_ellipsoid : ndarray
The filtered input.
Notes
-----
This function is implemented for arrays of rank 1, 2, or 3.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_ellipsoid(input_, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype=numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 2)
return output
def fourier_shift(input, shift, n=-1, axis=-1, output=None):
"""
Multi-dimensional fourier shift filter.
The array is multiplied with the fourier transform of a shift operation.
Parameters
----------
input : array_like
The input array.
shift : float or sequence
The size of the box used for filtering.
If a float, `shift` is the same for all axes. If a sequence, `shift`
has to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of shifting the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_shift : ndarray
The shifted input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> import numpy.fft
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_shift(input_, shift=200)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output = _get_output_fourier_complex(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
shifts = _ni_support._normalize_sequence(shift, input.ndim)
shifts = numpy.asarray(shifts, dtype=numpy.float64)
if not shifts.flags.contiguous:
shifts = shifts.copy()
_nd_image.fourier_shift(input, shifts, n, axis, output)
return output
| bsd-3-clause |
josephmfaulkner/stoqs | stoqs/loaders/CANON/realtime/Contour.py | 1 | 39846 | #!/usr/bin/env python
__author__ = 'D.Cline'
__version__ = '$Revision: $'.split()[1]
__date__ = '$Date: $'.split()[1]
__copyright__ = '2011'
__license__ = 'GPL v3'
__contact__ = 'dcline at mbari.org'
__doc__ = '''
Creates still and animated contour and dot plots plots from MBARI LRAUV data
D Cline
MBARI 25 September 2015
@var __date__: Date of last svn commit
@undocumented: __doc__ parser
@status: production
@license: GPL
'''
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE']='settings'
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../")) # settings.py is one dir up
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../../"))
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from django.contrib.gis.geos import LineString, MultiLineString, Point
import numpy as np
import re
import time
import pytz
import logging
import signal
import datetime
import ephem
import bisect
import tempfile
import shutil
from django.contrib.gis.geos import fromstr, MultiPoint
from django.db.models import Max, Min
from collections import OrderedDict
from collections import defaultdict
from django.db import connections
from datetime import datetime, timedelta, tzinfo
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.dates import DateFormatter
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
#from matplotlib.mlab import griddata
from numpy import arange
from scipy.interpolate import griddata
from scipy.spatial import ckdtree
from scipy.interpolate import Rbf
from mpl_toolkits.basemap import Basemap
from stoqs.models import Activity, ActivityParameter, ParameterResource, Platform, SimpleDepthTime, MeasuredParameter, Measurement, Parameter
from utils.utils import percentile
from matplotlib.transforms import Bbox, TransformedBbox, blended_transform_factory
from matplotlib import dates
from mpl_toolkits.axes_grid1.inset_locator import BboxPatch, BboxConnector, BboxConnectorPatch
# Set up global variables for logging output to STDOUT
logger = logging.getLogger('monitorTethysHotSpotLogger')
fh = logging.StreamHandler()
f = logging.Formatter("%(levelname)s %(asctime)sZ %(filename)s %(funcName)s():%(lineno)d %(message)s")
fh.setFormatter(f)
logger.addHandler(fh)
logger.setLevel(logging.DEBUG)
class NoPPDataException(Exception):
pass
class Contour(object):
'''
Create plots for visualizing data from LRAUV vehicles
'''
def __init__(self, startDatetime, endDatetime, database, platformName, plotGroup, title, outFilename, autoscale, plotDotParmName, booleanPlotGroup, animate=False, zoom=6, overlap=3):
self.startDatetime = startDatetime
self.endDatetime = endDatetime
self.platformName = platformName
self.plotGroup = plotGroup
self.plotGroupValid = []
self.title = title
self.animate = animate
self.outFilename = outFilename
self.database = database
self.autoscale = autoscale
self.plotDotParmName = plotDotParmName
self.booleanPlotGroup = booleanPlotGroup
self.zoom = zoom
self.overlap = overlap
self.dirpath = []
def getActivityExtent(self,startDatetime, endDatetime):
'''
Get spatial temporal extent for a platform.
'''
qs = Activity.objects.using(self.database).filter(platform__name__in=self.platformName)
qs = qs.filter(startdate__gte=startDatetime)
qs = qs.filter(enddate__lte=endDatetime)
seaQS = qs.aggregate(Min('startdate'), Max('enddate'))
self.activityStartTime = seaQS['startdate__min']
self.activityEndTime = seaQS['enddate__max']
dataExtent = qs.extent(field_name='maptrack')
return dataExtent
def getAxisInfo(self, parm):
'''
Return appropriate min and max values and units for a parameter name
'''
# Get the 1 & 99 percentiles of the data for setting limits on the scatter plot
apQS = ActivityParameter.objects.using(self.database).filter(activity__platform__name=self.platformName)
pQS = apQS.filter(parameter__name=parm).aggregate(Min('p010'), Max('p990'))
min, max = (pQS['p010__min'], pQS['p990__max'])
# Get units for each parameter
prQS = ParameterResource.objects.using(self.database).filter(resource__name='units').values_list('resource__value')
try:
units = prQS.filter(parameter__name=parm)[0][0]
except IndexError as e:
raise Exception("Unable to get units for parameter name %s from platform %s" % (parm, self.platformName))
sys.exit(-1)
return min, max, units
def getTimeSeriesData(self, startDatetime, endDatetime):
'''
Return time series of a list of Parameters from a Platform
'''
data_dict = defaultdict(lambda: {'datetime': [], 'lon': [], 'lat': [], 'depth': [], 'datavalue':[], 'units':'', 'p010':'', 'p990':''})
start_dt= []
end_dt = []
if not self.plotGroup :
raise Exception('Must specify list plotGroup')
for pln in self.platformName:
for g in self.plotGroup:
parameters = [x.strip() for x in g.split(',')]
parameters_valid = []
try:
for pname in parameters:
apQS = ActivityParameter.objects.using(self.database)
apQS = apQS.filter(activity__platform__name=pln)
apQS = apQS.filter(parameter__name=pname)
pQS = apQS.aggregate(Min('p010'), Max('p990'))
min, max = (pQS['p010__min'], pQS['p990__max'])
data_dict[pln+pname]['p010'] = pQS['p010__min']
data_dict[pln+pname]['p990'] = pQS['p990__max']
units=apQS.values('parameter__units')
data_dict[pln+pname]['units'] = units[0]['parameter__units']
qs = MeasuredParameter.objects.using(self.database)
qs = qs.filter(measurement__instantpoint__timevalue__gte=startDatetime)
qs = qs.filter(measurement__instantpoint__timevalue__lte=endDatetime)
qs = qs.filter(parameter__name=pname)
qs = qs.filter(measurement__instantpoint__activity__platform__name=pln)
sdt_count = qs.values_list('measurement__instantpoint__simpledepthtime__depth').count()
qs = qs.values('measurement__instantpoint__timevalue', 'measurement__depth', 'measurement__geom', 'datavalue').order_by('measurement__instantpoint__timevalue')
data_dict[pln+pname]['sdt_count'] = sdt_count
# only plot data with more than one point
if len(qs) > 0:
for rs in qs:
geom = rs['measurement__geom']
lat = geom.y
lon = geom.x
data_dict[pln+pname]['lat'].insert(0, lat)
data_dict[pln+pname]['lon'].insert(0, lon)
data_dict[pln+pname]['datetime'].insert(0, rs['measurement__instantpoint__timevalue'])
data_dict[pln+pname]['depth'].insert(0, rs['measurement__depth'])
data_dict[pln+pname]['datavalue'].insert(0, rs['datavalue'])
# for salinity, throw out anything less than 20 and do the percentiles manually
if pname.find('salinity') != -1 :
numpvar = np.array(data_dict[pln+pname]['datavalue'])
numpvar_filtered = numpvar[numpvar>20.0]
numpvar_filtered.sort()
listvar = list(numpvar_filtered)
p010 = percentile(listvar, 0.010)
p990 = percentile(listvar, 0.990)
data_dict[pln+pname]['p010'] = p010
data_dict[pln+pname]['p990'] = p990
# dates are in reverse order - newest first
start_dt.append(data_dict[pln+pname]['datetime'][-1])
end_dt.append(data_dict[pln+pname]['datetime'][0])
logger.debug('Loaded data for parameter %s' % pname)
parameters_valid.append(pname)
except Exception, e:
logger.error('%s not available in database for the dates %s %s' %(pname, startDatetime, endDatetime))
continue
except Exception, e:
logger.error('%s not available in database for the dates %s %s' %(pname, startDatetime, endDatetime))
continue
if len(parameters_valid) > 0:
self.plotGroupValid.append(','.join(parameters_valid))
# get the ranges of the data
if start_dt and end_dt:
data_start_dt = sorted(start_dt)[0]
data_end_dt = sorted(end_dt)[-1]
else:
#otherwise default to requested dates
data_start_dt = startDatetime
data_end_dt = endDatetime
return data_dict, data_start_dt, data_end_dt
def getMeasuredPPData(self, startDatetime, endDatetime, platform, parm):
points = []
data = []
activity_names = []
maptracks = []
try:
qs = MeasuredParameter.objects.using(self.database)
qs = qs.filter(measurement__instantpoint__timevalue__gte=startDatetime)
qs = qs.filter(measurement__instantpoint__timevalue__lte=endDatetime)
qs = qs.filter(parameter__name=parm)
qs = qs.filter(measurement__instantpoint__activity__platform__name=platform)
qs = qs.values('measurement__instantpoint__timevalue', 'measurement__geom', 'parameter', 'datavalue', 'measurement__instantpoint__activity__maptrack', 'measurement__instantpoint__activity__name').order_by('measurement__instantpoint__timevalue')
for rs in qs:
geom = rs['measurement__geom']
lon = geom.x
lat = geom.y
pt = Point(float(lon),float(lat))
points.append(pt)
value = rs['datavalue']
data.append(float(value))
geom = rs['measurement__instantpoint__activity__maptrack']
activity_name = rs['measurement__instantpoint__activity__name']
# only keep maptracks from new activities
if not any(activity_name in s for s in activity_names):
activity_names.append(activity_name)
maptracks.append(geom)
except Exception, e:
logger.error('%s not available in database for the dates %s %s' %(parm, startDatetime, endDatetime))
return data, points, maptracks
def loadData(self, startDatetime, endDatetime):
try:
self.data, dataStart, dataEnd = self.getTimeSeriesData(startDatetime, endDatetime)
return dataStart, dataEnd
except Exception, e:
logger.warn(e)
raise e
return startDatetime, endDatetime
class DateFormatter(mpl.ticker.Formatter):
def __init__(self, scale_factor=1):
self.scale_factor = scale_factor
def __call__(self, x, pos=None):
d = time.gmtime(x*self.scale_factor)
utc = datetime(*d[:6])
local_tz = pytz.timezone('America/Los_Angeles')
utc_tz = pytz.timezone('UTC')
utc = utc.replace(tzinfo=utc_tz)
pst = utc.astimezone(local_tz)
return pst.strftime('%Y-%m-%d %H:%M')
def readCLT(self, fileName):
'''
Read the color lookup table from disk and return a python list of rgb tuples.
'''
cltList = []
for rgb in open(fileName, 'r'):
(r, g, b) = rgb.split(' ')[1:]
cltList.append([float(r), float(g), float(b)])
return cltList
def shadeNight(self,ax,xdates,miny,maxy):
'''
Shades plots during local nighttime hours
'''
utc_zone = pytz.utc
if len(xdates) < 50:
logger.debug("skipping day/night shading - too few points")
return
datetimes = []
for xdt in xdates:
dt = datetime.fromtimestamp(xdt)
datetimes.append(dt.replace(tzinfo=utc_zone))
loc = ephem.Observer()
loc.lat = '36.7087' # Monterey Bay region
loc.lon = '-121.0000'
loc.elev = 0
sun = ephem.Sun(loc)
mint=min(datetimes)
maxt=max(datetimes)
numdays = (maxt - mint).days
d = [mint + timedelta(days=dt) for dt in xrange(numdays+1)]
d.sort()
sunrise = map(lambda x:dates.date2num(loc.next_rising(sun,start=x).datetime()),d)
sunset = map(lambda x:dates.date2num(loc.next_setting(sun,start=x).datetime()),d)
result = []
for st in datetimes:
result.append(bisect.bisect(sunrise, dates.date2num(st)) != bisect.bisect(sunset, dates.date2num(st)))
if self.scale_factor:
scale_xdates = [x/self.scale_factor for x in xdates]
else:
scale_xdates = xdates
ax.fill_between(scale_xdates, miny, maxy, where=result, facecolor='#C8C8C8', edgecolor='none', alpha=0.3)
def createPlot(self, startDatetime, endDatetime):
if len(self.data) == 0:
logger.debug('no data found to plot')
raise Exception('no data found to plot')
# GridSpecs for plots
outer_gs = gridspec.GridSpec(nrows=2, ncols=1, height_ratios=[1,3])
# tighten up space between plots
outer_gs.update(left=0.10, right=0.90, hspace=0.05)
map_gs = gridspec.GridSpecFromSubplotSpec(nrows=1, ncols=1, subplot_spec=outer_gs[0])
lower_gs = gridspec.GridSpecFromSubplotSpec(nrows=len(self.plotGroupValid), ncols=1, subplot_spec=outer_gs[1])
STATIC_ROOT = '/var/www/html/stoqs/static' # Warning: Hard-coded
clt = self.readCLT(os.path.join(STATIC_ROOT, 'colormaps', 'jetplus.txt'))
self.cm_jetplus = mpl.colors.ListedColormap(np.array(clt))
# start a new figure - size is in inches
fig = plt.figure(figsize=(8, 10))
fig.suptitle(self.title+'\n'+self.subtitle1+'\n'+self.subtitle2, fontsize=8)
pn = self.platformName[0]
# bound the depth to cover max of all parameter group depths
# and flag removal of scatter plot if have more than 2000 points in any parameter
maxy = 0
for group in self.plotGroupValid:
parm = [x.strip() for x in group.split(',')]
for name in parm:
y = max(self.data[pn+name]['depth'])
sz = len(self.data[pn+name]['datavalue'])
if y > maxy:
maxy = y
# pad the depth by 20 meters to make room for parameter name to be displayed at bottom
rangey = [0.0, int(maxy) + 20]
i = 0
# add contour plots for each parameter group
for group in self.plotGroupValid:
parm = [x.strip() for x in group.split(',')]
plot_step = sum([self.data[pn+p]['units'].count('bool') for p in parm]) # counter the number of boolean plots in the groups
plot_scatter = len(parm) - plot_step # otherwise all other plots are scatter plots
#plot_dense = sum([val for val in len(self.data[pn+name]['datavalue']) > 2000]) # if more than 2000 points, skip the scatter plot
# choose the right type of gridspec to display the data
if plot_scatter:
# one row for scatter and one for contour
plot_gs = gridspec.GridSpecFromSubplotSpec(nrows=len(parm)*2, ncols=2, subplot_spec=lower_gs[i], width_ratios=[30,1], wspace=0.05)
else:
# one row for step plots/or contour plots
plot_gs = gridspec.GridSpecFromSubplotSpec(nrows=len(parm), ncols=2, subplot_spec=lower_gs[i], width_ratios=[30,1], wspace=0.05)
j = 0
i += 1
for name in parm:
title = name
x = [time.mktime(xe.timetuple()) for xe in self.data[pn+name]['datetime']]
y = self.data[pn+name]['depth']
z = self.data[pn+name]['datavalue']
sdt_count = self.data[pn+name]['sdt_count']
units = '(' + self.data[pn+p]['units'] + ')'
if len(z):
if self.autoscale:
rangez = [self.data[pn+p]['p010'],self.data[pn+p]['p990']]
else:
rangez = [min(z), max(z)]
else:
rangez = [0, 0]
if name.find('chlorophyll') != -1 :
if not self.autoscale:
rangez = [0.0, 10.0]
if name.find('salinity') != -1 :
if not self.autoscale:
rangez = [33.3, 34.9]
units = ''
if name.find('temperature') != -1 :
if not self.autoscale:
rangez = [10.0, 14.0]
units = ' ($^\circ$C)'
logger.debug('getting subplot ax0')
gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=plot_gs[j])
ax0_plot = plt.Subplot(fig, gs[:])
fig.add_subplot(ax0_plot)
gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=plot_gs[j+1])
ax0_colorbar = plt.Subplot(fig, gs[:])
fig.add_subplot(ax0_colorbar)
if plot_scatter:
logger.debug('getting subplot ax1')
gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=plot_gs[j + 2])
ax1_plot = plt.Subplot(fig, gs[:])
fig.add_subplot(ax1_plot)
gs = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=plot_gs[j + 3])
ax1_colorbar = plt.Subplot(fig, gs[:])
fig.add_subplot(ax1_colorbar)
# if no data found add in some fake data and plot a placeholder time/depth plot
if not x:
tmin = time.mktime(startDatetime.timetuple())
tmax = time.mktime(endDatetime.timetuple())
x.append(tmin)
x.append(tmax)
y.append(np.NaN)
y.append(np.NaN)
z.append(np.NaN)
z.append(np.NaN)
if plot_scatter:
cs0, zi = self.createContourPlot(title + pn,ax0_plot,x,y,z,rangey,rangez,startDatetime,endDatetime,sdt_count)
cs1 = self.createScatterPlot(title + pn,ax1_plot,x,y,z,rangey,rangez,startDatetime,endDatetime)
else:
if plot_step:
cs0 = self.createStepPlot(title + pn,title,ax0_plot,x,z,rangez,startDatetime,endDatetime)
else:
cs0, zi = self.createContourPlot(title + pn,ax0_plot,x,y,z,rangey,rangez,startDatetime,endDatetime,sdt_count)
if plot_scatter:
ax1_plot.text(0.95,0.02, name, verticalalignment='bottom',
horizontalalignment='right',transform=ax1_plot.transAxes,color='black',fontsize=8)
else:
ax0_plot.text(0.95,0.02, name, verticalalignment='bottom',
horizontalalignment='right',transform=ax0_plot.transAxes,color='black',fontsize=8)
self.shadeNight(ax0_plot,sorted(x),rangey[0], rangey[1])
if plot_scatter:
self.shadeNight(ax1_plot,sorted(x),rangey[0], rangey[1])
logger.debug('plotting colorbars')
if plot_scatter:
cbFormatter = FormatStrFormatter('%.2f')
cb = plt.colorbar(cs0, cax=ax0_colorbar, ticks=[min(rangez), max(rangez)], format=cbFormatter, orientation='vertical')
cb.set_label(units,fontsize=8)#,labelpad=5)
cb.ax.xaxis.set_ticks_position('top')
for t in cb.ax.yaxis.get_ticklabels():
t.set_fontsize(8)
cb = plt.colorbar(cs1, cax=ax1_colorbar, ticks=[min(rangez), max(rangez)], format=cbFormatter, orientation='vertical')
cb.set_label(units,fontsize=8)#,labelpad=5)
cb.ax.xaxis.set_ticks_position('top')
for t in cb.ax.yaxis.get_ticklabels():
t.set_fontsize(8)
else:
if plot_step:
xaxis = ax0_colorbar.xaxis
ax0_colorbar.xaxis.set_major_locator(plt.NullLocator())
ax0_colorbar.yaxis.set_ticks_position('right')
for t in ax0_colorbar.yaxis.get_ticklabels():
t.set_fontsize(8)
else:
cbFormatter = FormatStrFormatter('%.2f')
cb = plt.colorbar(cs0, cax=ax0_colorbar, ticks=[min(rangez), max(rangez)], format=cbFormatter, orientation='vertical')
cb.set_label(units,fontsize=8)#,labelpad=5)
cb.ax.xaxis.set_ticks_position('top')
for t in cb.ax.yaxis.get_ticklabels():
t.set_fontsize(8)
# Rotate and show the date with date formatter in the last plot of all the groups
logger.debug('rotate and show date with date formatter')
if name is parm[-1] and group is self.plotGroupValid[-1]:
x_fmt = self.DateFormatter(self.scale_factor)
if plot_scatter:
ax = ax1_plot
# Don't show on the upper contour plot
ax0_plot.xaxis.set_ticks([])
else:
ax = ax0_plot
ax.xaxis.set_major_formatter(x_fmt)
# Rotate date labels
for label in ax.xaxis.get_ticklabels():
label.set_rotation(10)
else:
ax0_plot.xaxis.set_ticks([])
if plot_scatter:
ax1_plot.xaxis.set_ticks([])
if plot_scatter:
j+=4
else:
j+=2
# plot tracks
ax = plt.Subplot(fig, map_gs[:])
fig.add_subplot(ax, aspect='equal')
logger.debug('computing activity extents')
z = []
maptracks = None
z, points, maptracks = self.getMeasuredPPData(startDatetime, endDatetime, pn, self.plotDotParmName)
# get the percentile ranges for this to autoscale
pointsnp = np.array(points)
lon = pointsnp[:,0]
lat = pointsnp[:,1]
if self.extent:
ltmin = self.extent[1]
ltmax = self.extent[3]
lnmin = self.extent[0]
lnmax = self.extent[2]
lndiff = abs(lnmax - lnmin)
ltdiff = abs(ltmax - ltmin)
logger.debug("lon diff %f lat diff %f" %(lndiff, ltdiff))
mindeg = .02
paddeg = .01
if lndiff < mindeg :
lnmin -= mindeg
lnmax += mindeg
if ltdiff < mindeg:
ltmin -= mindeg
ltmax += mindeg
e = (lnmin - paddeg, ltmin - paddeg, lnmax + paddeg, ltmax + paddeg)
else:
# default map to Monterey Bay region
ltmin = 36.61
ltmax = 36.97
lnmin = -122.21
lnmax = -121.73
e = (lnmin, ltmin, lnmax, ltmax)
logger.debug('Extent found %f,%f,%f,%f)' % (e[0], e[1],e[2],e[3]))
# retry up to 5 times to get the basemap
for i in range(0, 5):
mp = Basemap(llcrnrlon=e[0], llcrnrlat=e[1], urcrnrlon=e[2], urcrnrlat=e[3], projection='cyl', resolution='l', ax=ax)
try:
##mp.wmsimage('http://www.gebco.net/data_and_products/gebco_web_services/web_map_service/mapserv?', layers=['GEBCO_08_Grid']) # Works, but coarse
mp.arcgisimage(server='http://services.arcgisonline.com/ArcGIS', service='Ocean_Basemap')
mp.drawparallels(np.linspace(e[1],e[3],num=3), labels=[True,False,False,False], fontsize=8, linewidth=0)
mp.drawmeridians(np.linspace(e[0],e[2],num=3), labels=[False,False,False,True], fontsize=8, linewidth=0)
except Exception, e:
logger.error('Could not download ocean basemap ')
mp = None
if mp is not None :
break
if mp is None :
logger.debug('Error - cannot cannot fetch basemap')
return
try:
logger.debug('plotting tracks')
if self.animate:
try:
track = LineString(points).simplify(tolerance=.001)
if track is not None:
ln,lt = zip(*track)
mp.plot(ln,lt,'-',c='k',alpha=0.5,linewidth=2, zorder=1)
except TypeError, e:
logger.warn("%s\nCannot plot map track path to None", e)
else:
for track in maptracks:
if track is not None:
ln,lt = zip(*track)
mp.plot(ln,lt,'-',c='k',alpha=0.5,linewidth=2, zorder=1)
# if have a valid series, then plot the dots
if self.plotDotParmName is not None and len(z) > 0:
if len(z) > 2000:
sz = len(z)
stride = int(sz/200)
z_stride = z[0:sz:stride]
lon_stride = lon[0:sz:stride]
lat_stride = lat[0:sz:stride]
mp.scatter(lon_stride,lat_stride,c=z_stride,marker='.',lw=0,alpha=1.0,cmap=self.cm_jetplus,zorder=2)
if stride > 1:
ax.text(0.70,0.1, ('%s (every %d points)' % (self.plotDotParmName, stride)), verticalalignment='bottom',
horizontalalignment='center',transform=ax.transAxes,color='black',fontsize=8)
else:
ax.text(0.70,0.1, ('%s (every point)' % (self.plotDotParmName)), verticalalignment='bottom',
horizontalalignment='center',transform=ax.transAxes,color='black',fontsize=8)
else:
mp.scatter(lon,lat,c=z,marker='.',lw=0,alpha=1.0,cmap=self.cm_jetplus,zorder=2)
ax.text(0.70,0.1, ('%s (every point)' % (self.plotDotParmName)), verticalalignment='bottom',
horizontalalignment='center',transform=ax.transAxes,color='black',fontsize=8)
# plot the binary markers
markers = ['o','x','d','D','8','1','2','3','4']
i = 1
parm = [x.strip() for x in self.booleanPlotGroup.split(',')]
for name in parm:
logger.debug('Plotting boolean plot group paramter %s' % name)
z, points, maptracks = self.getMeasuredPPData(startDatetime, endDatetime, self.platformName[0], name)
pointsnp = np.array(points)
lon = pointsnp[:,0]
lat = pointsnp[:,1]
# scale up the size of point
s = [20*val for val in z]
if len(z) > 0:
mp.scatter(lon,lat,s=s,marker=markers[i],c='black',label=name,zorder=3)
i = i + 1
# plot the legend outside the plot in the upper left corner
l = ax.legend(loc='upper left', bbox_to_anchor=(1,1), prop={'size':8}, scatterpoints=1)# only plot legend symbol once
l.set_zorder(4) # put the legend on top
except Exception, e:
logger.warn(e)
#mp.fillcontinents()
#mp.drawcoastlines()
if self.animate:
# append frames output as pngs with an indexed frame number before the gif extension
fname = '%s/frame_%02d.png' % (self.dirpath, self.frame)
else:
fname = self.outFilename
logger.debug('Saving figure %s ' % fname)
fig.savefig(fname,dpi=120)#,transparent=True)
plt.close()
self.frame += 1
logger.debug('Done with contourPlot')
# Register an handler for the timeout
def handler(self,signum, frame):
logger.debug("Exceeded maximum time allowed for gridding!")
raise Exception("end of time")
def gridData(self, x, y, z, xi, yi):
try:
logger.debug('Gridding')
zi = griddata((x, y), np.array(z), (xi[None,:], yi[:,None]), method='nearest')
logger.debug('Done gridding')
except KeyError, e:
logger.warn('Got KeyError. Could not grid the data')
zi = None
raise(e)
return zi
def gridDataRbf(self, tmin, tmax, dmin, dmax, x, y, z):
from scipy.interpolate import Rbf
xi=[]
try:
xi, yi = np.mgrid[tmin:tmax:1000j, dmin:dmax:100j]
# use RBF
rbf = Rbf(x, y, z, epsilon=2)
zi = rbf(xi, yi)
except Exception, e:
logger.warn('Could not grid the data' + str(e))
zi = None
return xi,yi,zi
def createContourPlot(self,title,ax,x,y,z,rangey,rangez,startTime,endTime,sdt_count):
tmin = time.mktime(startTime.timetuple())
tmax = time.mktime(endTime.timetuple())
tgrid_max = 1000 # Reasonable maximum width for time-depth-flot plot is about 1000 pixels
dgrid_max = 200 # Height of time-depth-flot plot area is 200 pixels
dinc = 0.5 # Average vertical resolution of AUV Dorado
nlevels = 255 # Number of color filled contour levels
zmin = rangez[0]
zmax = rangez[1]
dmin = rangey[0]
dmax = rangey[1]
scale_factor = 1
# 2 points define a line, take half the number of simpledepthtime points
sdt_count = int(max(sdt_count, 2) / 2)
if sdt_count > tgrid_max:
sdt_count = tgrid_max
xi = np.linspace(tmin, tmax, sdt_count)
#print 'xi = %s' % xi
# Make depth spacing dinc m, limit to time-depth-flot resolution (dgrid_max)
y_count = int((dmax - dmin) / dinc )
if y_count > dgrid_max:
y_count = dgrid_max
yi = np.linspace(dmin, dmax, y_count)
#print 'yi = %s' %yi
try:
scale_factor = float(tmax -tmin) / (dmax - dmin)
except ZeroDivisionError, e:
logger.warn('Not setting scale_factor. Scatter plots will still work.')
contour_flag = False
scale_factor = 1
else:
logger.warn('self.scale_factor = %f' % scale_factor)
xi = xi / scale_factor
xg = [xe/scale_factor for xe in x]
contour_flag = True
zi = []
cs = None
# Register the signal function handler
signal.signal(signal.SIGALRM, self.handler)
# Define a timeout of 90 seconds for gridding functions
signal.alarm(90)
if not self.data:
logger.warn('no data found to plot')
signal.alarm(0)
raise Exception('no data')
if contour_flag:
try:
logger.warn('Gridding data with sdt_count = %d, and y_count = %d' %(sdt_count, y_count))
zi = self.gridData(xg, y, z, xi, yi)
signal.alarm(0)
except KeyError, e:
logger.warn('Got KeyError. Could not grid the data')
contour_flag = False
scale_factor = 1
try:
# use RBF
logger.warn('Trying radial basis function')
xi,yi,zi = self.gridDataRbf(tmin, tmax, dmin, dmax, xg, y, z)
contour_flag = True
signal.alarm(0)
except Exception, e:
logger.warn('Could not grid the data' + str(e))
except Exception, e:
logger.warn('Could not grid the data' + str(e))
contour_flag = False
try:
# use RBF
logger.warn('Trying radial basis function')
xi,yi,zi = self.gridDataRbf(tmin, tmax, dmin, dmax, xg, y, z)
contour_flag = True
signal.alarm(0)
except Exception, e:
logger.warn('Could not grid the data' + str(e))
try:
if scale_factor > 1 and contour_flag:
ax.set_xlim(tmin / scale_factor, tmax / scale_factor)
else:
ax.set_xlim(tmin, tmax)
self.scale_factor = scale_factor
ax.set_ylim([dmax,dmin])
ax.set_ylabel('depth (m)',fontsize=8)
ax.tick_params(axis='both',which='major',labelsize=8)
ax.tick_params(axis='both',which='minor',labelsize=8)
if contour_flag:
logger.debug('Contouring the data')
cs = ax.contourf(xi, yi, zi, levels=np.linspace(zmin,zmax, nlevels), cmap=self.cm_jetplus, extend='both')
# this will show the points where the contouring occurs
#ax.scatter(x,y,marker='.',s=2,c='k',lw=0)
else:
logger.debug('Plotting the data')
cs = ax.scatter(x,y,c=z,s=20,marker='.',vmin=zmin,vmax=zmax,lw=0,alpha=1.0,cmap=self.cm_jetplus)
# limit the number of ticks
max_yticks = 5
yloc = plt.MaxNLocator(max_yticks)
ax.yaxis.set_major_locator(yloc)
except Exception,e:
logger.error(e)
return cs, zi
def createScatterPlot(self,title,ax,x,y,z,rangey,rangez,startTime,endTime):
tmin = time.mktime(startTime.timetuple())
tmax = time.mktime(endTime.timetuple())
nlevels = 255 # Number of color filled contour levels
zmin = rangez[0]
zmax = rangez[1]
dmin = rangey[0]
dmax = rangey[1]
try:
ax.set_xlim(tmin, tmax)
self.scale_factor = 1
ax.set_ylim([dmax,dmin])
ax.set_ylabel('depth (m)',fontsize=8)
ax.tick_params(axis='both',which='major',labelsize=8)
ax.tick_params(axis='both',which='minor',labelsize=8)
logger.debug('Plotting the data')
cs = ax.scatter(x,y,c=z,s=20,marker='.',vmin=zmin,vmax=zmax,lw=0,alpha=1.0,cmap=self.cm_jetplus)
# limit the number of ticks
max_yticks = 5
yloc = plt.MaxNLocator(max_yticks)
ax.yaxis.set_major_locator(yloc)
except Exception,e:
logger.error(e)
return cs
def createStepPlot(self,title,label,ax,x,y,rangey,startTime,endTime):
tmin = time.mktime(startTime.timetuple())
tmax = time.mktime(endTime.timetuple())
dmin = rangey[1]
dmax = rangey[0]
try:
ax.set_xlim(tmin, tmax)
self.scale_factor = 1
ax.set_ylim([dmax,dmin])
ax.set_ylabel('%s (bool)' % label,fontsize=8)
ax.tick_params(axis='both',which='major',labelsize=8)
ax.tick_params(axis='both',which='minor',labelsize=8)
logger.debug('Plotting the step data')
labels = []
for val in y:
if not val:
labels.append('False')
else:
labels.append('True')
cs = ax.step(x,y,lw=1,alpha=0.8,c='black',label=labels)
# limit the number of ticks
max_yticks = 5
yloc = plt.MaxNLocator(max_yticks)
ax.yaxis.set_major_locator(yloc)
except Exception,e:
logger.error(e)
return cs
def run(self):
self.frame = 0
endDatetimeLocal = self.endDatetime.astimezone(pytz.timezone('America/Los_Angeles'))
startDatetimeLocal = self.startDatetime.astimezone(pytz.timezone('America/Los_Angeles'))
self.extent = self.getActivityExtent(self.startDatetime, self.endDatetime)
logger.debug('Loading data')
dataStart, dataEnd = self.loadData(self.startDatetime, self.endDatetime)
if not self.data:
logger.debug('No valid data to plot')
return
# need to fix the scale over all the plots if animating
if self.animate:
self.autoscale = True
if dataStart.tzinfo is None:
dataStart = pytz.utc.localize(dataStart)
if dataEnd.tzinfo is None:
dataEnd = pytz.utc.localize(dataEnd)
if self.animate:
self.dirpath = tempfile.mkdtemp()
zoomWindow = timedelta(hours=self.zoom)
zoomWindow = timedelta(hours=self.zoom)
overlapWindow = timedelta(hours=self.overlap)
endDatetime = dataStart + zoomWindow
startDatetime = dataStart
try:
# Loop through sections of the data with temporal constraints based on the window and step command line parameters
while endDatetime <= dataEnd :
dataEndDatetimeLocal = endDatetime.astimezone(pytz.timezone('America/Los_Angeles'))
dataStartDatetimeLocal = startDatetime.astimezone(pytz.timezone('America/Los_Angeles'))
logger.debug('Plotting data')
self.subtitle1 = '%s to %s PDT' % (dataStartDatetimeLocal.strftime('%Y-%m-%d %H:%M'), dataEndDatetimeLocal.strftime('%Y-%m-%d %H:%M'))
self.subtitle2 = '%s to %s UTC' % (startDatetime.strftime('%Y-%m-%d %H:%M'), endDatetime.strftime('%Y-%m-%d %H:%M'))
self.createPlot(startDatetime, endDatetime)
startDatetime = endDatetime - overlapWindow
endDatetime = startDatetime + zoomWindow
i = 0
cmd = "convert -loop 1 -delay 250 %s/frame*.png %s" % (self.dirpath,self.outFilename)
logger.debug(cmd)
os.system(cmd)
shutil.rmtree(self.dirpath)
except Exception, e:
logger.error(e)
else :
try:
if self.data is not None:
dataEndDatetimeLocal = dataEnd.astimezone(pytz.timezone('America/Los_Angeles'))
dataStartDatetimeLocal = dataStart.astimezone(pytz.timezone('America/Los_Angeles'))
logger.debug('Plotting data')
self.subtitle1 = '%s to %s PDT' % (dataStartDatetimeLocal.strftime('%Y-%m-%d %H:%M'), dataEndDatetimeLocal.strftime('%Y-%m-%d %H:%M'))
self.subtitle2 = '%s to %s UTC' % (dataStart.strftime('%Y-%m-%d %H:%M'), dataEnd.strftime('%Y-%m-%d %H:%M'))
self.createPlot(dataStart, dataEnd)
except Exception, e:
logger.error(e)
| gpl-3.0 |
bthirion/scikit-learn | examples/gaussian_process/plot_gpr_noisy.py | 104 | 3778 | """
=============================================================
Gaussian process regression (GPR) with noise-level estimation
=============================================================
This example illustrates that GPR with a sum-kernel including a WhiteKernel can
estimate the noise level of data. An illustration of the
log-marginal-likelihood (LML) landscape shows that there exist two local
maxima of LML. The first corresponds to a model with a high noise level and a
large length scale, which explains all variations in the data by noise. The
second one has a smaller noise level and shorter length scale, which explains
most of the variation by the noise-free functional relationship. The second
model has a higher likelihood; however, depending on the initial value for the
hyperparameters, the gradient-based optimization might also converge to the
high-noise solution. It is thus important to repeat the optimization several
times for different initializations.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, WhiteKernel
rng = np.random.RandomState(0)
X = rng.uniform(0, 5, 20)[:, np.newaxis]
y = 0.5 * np.sin(3 * X[:, 0]) + rng.normal(0, 0.5, X.shape[0])
# First run
plt.figure(0)
kernel = 1.0 * RBF(length_scale=100.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Second run
plt.figure(1)
kernel = 1.0 * RBF(length_scale=1.0, length_scale_bounds=(1e-2, 1e3)) \
+ WhiteKernel(noise_level=1e-5, noise_level_bounds=(1e-10, 1e+1))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=0.0).fit(X, y)
X_ = np.linspace(0, 5, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
plt.plot(X_, y_mean, 'k', lw=3, zorder=9)
plt.fill_between(X_, y_mean - np.sqrt(np.diag(y_cov)),
y_mean + np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k')
plt.plot(X_, 0.5*np.sin(3*X_), 'r', lw=3, zorder=9)
plt.scatter(X[:, 0], y, c='r', s=50, zorder=10)
plt.title("Initial: %s\nOptimum: %s\nLog-Marginal-Likelihood: %s"
% (kernel, gp.kernel_,
gp.log_marginal_likelihood(gp.kernel_.theta)))
plt.tight_layout()
# Plot LML landscape
plt.figure(2)
theta0 = np.logspace(-2, 3, 49)
theta1 = np.logspace(-2, 0, 50)
Theta0, Theta1 = np.meshgrid(theta0, theta1)
LML = [[gp.log_marginal_likelihood(np.log([0.36, Theta0[i, j], Theta1[i, j]]))
for i in range(Theta0.shape[0])] for j in range(Theta0.shape[1])]
LML = np.array(LML).T
vmin, vmax = (-LML).min(), (-LML).max()
vmax = 50
plt.contour(Theta0, Theta1, -LML,
levels=np.logspace(np.log10(vmin), np.log10(vmax), 50),
norm=LogNorm(vmin=vmin, vmax=vmax))
plt.colorbar()
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Length-scale")
plt.ylabel("Noise-level")
plt.title("Log-marginal-likelihood")
plt.tight_layout()
plt.show()
| bsd-3-clause |
m4rx9/rna-pdb-tools | rna_tools/tools/clarna_play/ClaRNAlib/doublet-params.py | 2 | 14757 | #!/usr/bin/env python
#
import re
import sys
import itertools
import multiprocessing
import random
from optparse import OptionParser
import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import scipy as scipy
import numpy as np
import scipy.cluster.hierarchy as sch
from utils import *
from distances import rmsd_distance, doublet_params_dict, normalize_points, Doublet, Residue
from progressbar import ProgressBar, Percentage, Bar, ETA
def parse_args():
"""setup program options parsing"""
parser = OptionParser(description="""some experiments with classifier generation""")
parser.add_option("--doublet-id", dest="doublet_id",
help="show params for doublet", metavar="PDBID:R1:R2")
parser.add_option("--input-pdb", dest="input_pdb",
help="read doublets from PDB file", metavar="FILE")
parser.add_option("--input-json", dest="input_json",
help="read doublets from json file", metavar="FILE")
parser.add_option("--only-keys", dest="only_keys",
help="process only following keys from input file", metavar="KEYS")
parser.add_option("--input-group-info", dest="input_group_info",
help="read doublets from group info file", metavar="FILE")
parser.add_option("--show-histograms", dest="show_histograms", action='store_true',
help="show histograms", default=False)
parser.add_option("--show-2d-histograms", dest="show_2d_histograms", action='store_true',
help="show 2D histograms", default=False)
parser.add_option("--data-dir", dest="data_dir",
help="directory with data", metavar="DIR", default="gc-data")
parser.add_option("--params-type", dest="params_type",
help="parameters type", metavar="T", default="bp")
parser.add_option("--eval-for", dest="eval_for",
help="process only keys for given DESC", metavar="DESC")
parser.add_option("--gen-pdb-for", dest="gen_pdb_for",
help="gen pdb file")
parser.add_option("-o","--output", dest="output",
help="save output to file", metavar="FILE")
parser.add_option("--limit", dest="limit",
help="limit number of elements", metavar="N")
(options, args) = parser.parse_args()
if options.gen_pdb_for:
if not options.input_group_info:
prefix = "unk"
if "BPh" in options.gen_pdb_for:
prefix = "base-phosphate"
elif "BR" in options.gen_pdb_for:
prefix = "base-ribose"
elif "<" in options.gen_pdb_for or ">" in options.gen_pdb_for:
prefix = "stacking"
else:
prefix = "bp"
options.input_group_info = os.path.join(options.data_dir,"cl",prefix+"_"+options.gen_pdb_for.replace("/","_")+".json.gz")
options.n_type = options.gen_pdb_for.split("/")[1]
elif options.eval_for:
if not options.only_keys:
options.only_keys = ",".join(["evaluation/"+k+"/"+options.eval_for for k in ('ref-ok','ref-ok-fuzzy','ref-undetected','ref-diff','prev-undetected','ref-all')])
if not options.input_json:
options.input_json = FileNamesObject.eval_fn(setname="training",t="eval1")
if options.limit is not None:
options.limit = int(options.limit)
return (parser, options, args)
def compute_params(doublet_lists, labels, options):
assert len(doublet_lists) == len(labels)
if options.limit:
for label,x in zip(labels,doublet_lists):
if len(x)>options.limit:
print "reducing number of elements of set %s from %d to %d" % (label,len(x),options.limit)
doublet_lists = [x[0:options.limit] for x in doublet_lists]
l_count = len(doublet_lists)
all_ids = set(sum(doublet_lists,[]))
r_atoms = ['P',"C1'","O2'","O3'","O4'"]
r_atoms += ["OP1","OP2","O5'","NEXT:O3'"]
# base atoms
r_atoms += ['N9', 'C4', 'N3', 'N1', 'C6', 'N6', 'C8', 'C5', 'C2', 'N7']
r_atoms += ['N1', 'C2', 'O2', 'N3', 'C4', 'N4', 'C6', 'C5']
r_atoms += ['N1', 'C2', 'O2', 'N3', 'C4', 'O4', 'C6', 'C5']
r_atoms += ['N9', 'C4', 'N3', 'N1', 'C6', 'O6', 'C8', 'C5', 'C2', 'N7', 'N2']
r_atoms = set(r_atoms)
if options.params_type in ['base-ribose']:
r_atoms += ["C4'","C3'","C2'","C1'"]
dd = DoubletsDict(options.data_dir, reduced_atoms=r_atoms)
dd.load_pdb_files(all_ids, verbose=True)
res = {}
stats = [{} for i in xrange(l_count)]
total = [0 for i in xrange(l_count)]
for i,d_list in enumerate(doublet_lists):
for d_id in d_list:
print d_id
d = dd.get(d_id)
if d is None or d[0] is None or d[1] is None:
print "INVALID doublet! %s" % d_id
continue
n_type = dd.get_n_type(d_id)
# p = doublet_params_dict(d, n_type, options.params_type)
p = Doublet(d_id,Residue("A1",n_type[0],d[0]),Residue("B1",n_type[1],d[1])).get_all_params()
# p['dist_sum']=p['min_dist']*4+p['dist']
print i, labels[i], d_id, n_type, [(k,p[k]) for k in sorted(p.keys())]
for k,v in p.items():
if not res.has_key(k):
res[k] = [[] for j in xrange(l_count)]
res[k][i].append(v)
total[i] += 1
key = " ".join([k+"="+str(v) for k,v in sorted(p.items()) if re.match('^i_',k) or k=='oxygens_count' or re.match('^strand_orient',k)])
if not stats[i].has_key(key):
stats[i][key] = []
stats[i][key].append(d_id)
for i in xrange(l_count):
for k in sorted(stats[i].keys(), key=lambda x: len(stats[i][x]), reverse=True):
print "STATS[%s] %s count=%d pr=%.2f%% samples=%s" % (labels[i], k,len(stats[i][k]), float(100*len(stats[i][k]))/total[i], stats[i][k][0:10])
if options.show_2d_histograms:
for k1,k2 in [('nn_ang_norm','n12cc_ang'),('rot_ang','min_dist'),('dist','min_dist'),('n2_z','nn_ang_norm')]:
l = len(res[k1])
for i in xrange(l):
data_x = []
data_y = []
for j in xrange(len(res[k1][i])):
x = res[k1][i][j]
y = res[k2][i][j]
if x is not None and y is not None:
data_x.append(x)
data_y.append(y)
print labels[i], x, y
H, xedges, yedges = np.histogram2d(data_x, data_y, bins=(20,20), range=[[min(data_x),max(data_x)], [min(data_y),max(data_y)]], normed=False)
# czy tu sa dobre wartosci?
plt.pcolor(xedges, yedges, H, cmap=matplotlib.cm.get_cmap('OrRd'))
# extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
# plt.imshow(H.transpose(), extent=extent, interpolation='nearest', origin="lower", cmap=matplotlib.cm.get_cmap('OrRd'))
plt.colorbar()
plt.xlabel(k1)
plt.ylabel(k2)
plt.title("(%s,%s) - %s" % (k1,k2,labels[i]))
plt.show()
# draw all series on single plot
if l>1:
fig = plt.figure()
ax = fig.add_subplot(111)
colors = ('r', 'g', 'b', 'k')
for i in xrange(l):
data_x = []
data_y = []
for j in xrange(len(res[k1][i])):
x = res[k1][i][j]
y = res[k2][i][j]
if x is not None and y is not None:
data_x.append(x)
data_y.append(y)
ax.scatter(data_x, data_y, marker='o', c=colors[i%len(colors)], label=labels[i])
plt.legend()
ax.set_xlabel(k1)
ax.set_ylabel(k2)
plt.title("(%s,%s)" % (k1,k2))
plt.show()
if options.show_histograms:
for k in sorted(res.keys()):
data = [[x for x in row if x is not None] for row in res[k]]
sum_data = sum(data,[])
if len(sum_data)==0:
print "empty data set"
continue
if k in ['oxygens','br_info','ph_info']:
continue
min_v = min(sum_data)
max_v = max(sum_data)
print "k", k, "min_v", min_v, "max_v", max_v, "data"
fig = plt.figure()
n, bins, patches = plt.hist(data, 25, range=(min_v,max_v), normed=0, histtype='bar',
label=labels)
plt.legend()
plt.title(k)
plt.show()
def points2model(model_num,points,n_type):
num = 0
model = PDB.Model.Model(model_num)
for i,p in enumerate(points):
chain = PDB.Chain.Chain(chr(ord('A')+i))
num += 1
residue = PDB.Residue.Residue((' ',i,' '),n_type[i],num)
for j,(k,v) in enumerate(p.items()):
num += 1
kk = k.replace("NEXT:","")
element = k.replace("NEXT:","").replace("1","").replace("2","")[0]
# print "serial: %s, k=%s, element=%s" % (num,k,element)
atom = PDB.Atom.Atom(kk,v,1,1," ",fullname=kk,serial_number=num,element=element)
residue.add(atom)
chain.add(residue)
model.add(chain)
return model
def gen_pdb(doublets,doublets_unclassified,options):
from distances import NORMALIZED_BASE
from Bio.SVDSuperimposer import SVDSuperimposer
n_type0 = options.n_type[0]
SUP_ATOMS = {
'A': ['N9', 'C4', 'N3', 'N1', 'C6', 'N6', 'C8', 'C5', 'C2', 'N7'],
'C': ['N1', 'C2', 'O2', 'N3', 'C4', 'N4', 'C6', 'C5'],
'U': ['N1', 'C2', 'O2', 'N3', 'C4', 'O4', 'C6', 'C5'],
'G': ['N9', 'C4', 'N3', 'N1', 'C6', 'O6', 'C8', 'C5', 'C2', 'N7', 'N2'],
}
r_atoms = ['N1','N2','N3','N4','N6','N7','P']
r_atoms += ['C2','C4','C5','C6','C8']
r_atoms += ['O2']
r_atoms += ["O2'","O3'","O4'"]
r_atoms += ["OP1","OP2","O5'","NEXT:O3'"]
r_atoms += ['N1','C6','O6','C5','C4','N3','C2','N2','N7','C8','N9']
# 'A': ['N9', 'C4', 'N3', 'N1', 'C6', 'N6', 'C8', 'C5', 'C2', 'N7'],
# 'C': ['N1', 'C2', 'O2', 'N3', 'C4', 'N4', 'C6', 'C5'],
# 'U': ['N1', 'C2', 'O2', 'N3', 'C4', 'O4', 'C6', 'C5'],
# 'G': ['N9', 'C4', 'N3', 'N1', 'C6', 'O6', 'C8', 'C5', 'C2', 'N7', 'N2'],
dd = DoubletsDict(options.data_dir, reduced_atoms=r_atoms)
dd.load_pdb_files(doublets+doublets_unclassified, verbose=True)
for d_set,suffix in (doublets,""),(doublets_unclassified,"-uncl"):
s = PDB.Structure.Structure("superimpose")
num = 0
model = points2model(1,(NORMALIZED_BASE[n_type0],{}),options.n_type)
s.add(model)
for d_num,d_id in enumerate(d_set,start=2):
print d_num,d_id
new_p = normalize_points(dd.get(d_id), n_type0)
for k,v in new_p[0].items():
if k not in SUP_ATOMS[n_type0]:
del new_p[0][k]
for k,v in new_p[1].items():
if k not in ['OP1','OP2','P',"O5'","NEXT:O3'"]:
del new_p[1][k]
model = points2model(d_num,new_p,options.n_type)
s.add(model)
out_fn = options.output.replace(".pdb","")+suffix+".pdb"
save_pdb(out_fn,s)
########################
def main():
(parser,options,_args) = parse_args()
doublet_lists = []
labels = []
if options.doublet_id:
doublet_lists.append([options.doublet_id])
labels.append("d:%s" % options.doublet_id)
elif options.gen_pdb_for:
json = load_json(options.input_group_info)
assert isinstance(json,dict)
assert len(json.keys())==1
v = []
vu = []
for group_info in json.values():
v = group_info['all_doublets']
vu = list(set([x for row in group_info['neigh_unclassified'] for x,d in row]))
gen_pdb(v,vu,options)
elif options.input_group_info:
json = load_json(options.input_group_info)
assert isinstance(json,dict)
assert len(json.keys())==1
for group_info in json.values():
assert isinstance(group_info,dict)
for k in ('all_doublets', 'neigh_unclassified', 'neigh_other'):
v = group_info[k]
if k=='all_doublets':
doublet_lists.append(v)
labels.append('reference')
else:
vv = list(set([did for row in v for (did,dist) in row]))
doublet_lists.append(vv)
labels.append(k.split("_")[1])
elif options.input_json:
only_keys = None
if options.only_keys:
only_keys = options.only_keys.split(",")
for fn in options.input_json.split(","):
if fn=='':
continue
json = load_json(fn)
if isinstance(json,dict):
print "DICT!"
keys = json.keys()
if only_keys is not None:
if len(only_keys)==1:
regexp = re.compile('^'+only_keys[0]+'$')
keys = [k for k in json.keys() if regexp.match(k)]
else:
keys = only_keys
for k in keys:
if not json.has_key(k):
continue
v = json[k]
assert all([isinstance(did,str) for did in v])==True
print k
doublet_lists.append(v)
labels.append(k)
if only_keys is not None and len(only_keys)==1:
doublet_lists = [sum(doublet_lists,[])]
labels = [only_keys[0]]
elif isinstance(json,list):
print "LIST!"
assert all([isinstance(did,str) for did in json])==True
doublet_lists.append(json)
labels.append(os.path.basename(fn))
else:
raise Exception("Unknown format of JSON file")
elif options.input_pdb:
structure = load_pdb(options.input_pdb)
residues = [r for r in structure.get_residues()]
assert len(residues)==2
n_type = residues[0].resname.strip()+residues[1].resname.strip()
print doublet_params_dict((simplify_residue(residues[0]),simplify_residue(residues[1])),n_type,options.params_type)
compute_params(doublet_lists, labels, options)
if __name__=="__main__":
main() | mit |
dpshelio/astropy-helpers | astropy_helpers/tests/test_setup_helpers.py | 1 | 13827 | import os
import sys
import stat
import shutil
import importlib
import contextlib
import pytest
from textwrap import dedent
from setuptools import Distribution
from ..setup_helpers import get_package_info, register_commands
from ..commands import build_ext
from . import reset_setup_helpers, reset_distutils_log # noqa
from . import run_setup, cleanup_import
ASTROPY_HELPERS_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
def _extension_test_package(tmpdir, request, extension_type='c', include_numpy=False):
"""Creates a simple test package with an extension module."""
test_pkg = tmpdir.mkdir('test_pkg')
test_pkg.mkdir('apyhtest_eva').ensure('__init__.py')
# TODO: It might be later worth making this particular test package into a
# reusable fixture for other build_ext tests
if extension_type in ('c', 'both'):
# A minimal C extension for testing
test_pkg.join('apyhtest_eva', 'unit01.c').write(dedent("""\
#include <Python.h>
#ifndef PY3K
#if PY_MAJOR_VERSION >= 3
#define PY3K 1
#else
#define PY3K 0
#endif
#endif
#if PY3K
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"unit01",
NULL,
-1,
NULL
};
PyMODINIT_FUNC
PyInit_unit01(void) {
return PyModule_Create(&moduledef);
}
#else
PyMODINIT_FUNC
initunit01(void) {
Py_InitModule3("unit01", NULL, NULL);
}
#endif
"""))
if extension_type in ('pyx', 'both'):
# A minimal Cython extension for testing
test_pkg.join('apyhtest_eva', 'unit02.pyx').write(dedent("""\
print("Hello cruel angel.")
"""))
if extension_type == 'c':
extensions = ['unit01.c']
elif extension_type == 'pyx':
extensions = ['unit02.pyx']
elif extension_type == 'both':
extensions = ['unit01.c', 'unit02.pyx']
include_dirs = ['numpy'] if include_numpy else []
extensions_list = [
"Extension('apyhtest_eva.{0}', [join('apyhtest_eva', '{1}')], include_dirs={2})".format(
os.path.splitext(extension)[0], extension, include_dirs)
for extension in extensions]
test_pkg.join('apyhtest_eva', 'setup_package.py').write(dedent("""\
from setuptools import Extension
from os.path import join
def get_extensions():
return [{0}]
""".format(', '.join(extensions_list))))
test_pkg.join('setup.py').write(dedent("""\
import sys
from os.path import join
from setuptools import setup
sys.path.insert(0, r'{astropy_helpers_path}')
from astropy_helpers.setup_helpers import register_commands
from astropy_helpers.setup_helpers import get_package_info
from astropy_helpers.version_helpers import generate_version_py
if '--no-cython' in sys.argv:
from astropy_helpers.commands import build_ext
build_ext.should_build_with_cython = lambda *args: False
sys.argv.remove('--no-cython')
NAME = 'apyhtest_eva'
VERSION = '0.1'
RELEASE = True
cmdclassd = register_commands(NAME, VERSION, RELEASE)
generate_version_py(NAME, VERSION, RELEASE, False, False)
package_info = get_package_info()
setup(
name=NAME,
version=VERSION,
cmdclass=cmdclassd,
**package_info
)
""".format(astropy_helpers_path=ASTROPY_HELPERS_PATH)))
if '' in sys.path:
sys.path.remove('')
sys.path.insert(0, '')
def finalize():
cleanup_import('apyhtest_eva')
request.addfinalizer(finalize)
return test_pkg
@pytest.fixture
def extension_test_package(tmpdir, request):
return _extension_test_package(tmpdir, request, extension_type='both')
@pytest.fixture
def c_extension_test_package(tmpdir, request):
# Check whether numpy is installed in the test environment
has_numpy = bool(importlib.util.find_spec('numpy'))
return _extension_test_package(tmpdir, request, extension_type='c',
include_numpy=has_numpy)
@pytest.fixture
def pyx_extension_test_package(tmpdir, request):
return _extension_test_package(tmpdir, request, extension_type='pyx')
def test_cython_autoextensions(tmpdir):
"""
Regression test for https://github.com/astropy/astropy-helpers/pull/19
Ensures that Cython extensions in sub-packages are discovered and built
only once.
"""
# Make a simple test package
test_pkg = tmpdir.mkdir('test_pkg')
test_pkg.mkdir('yoda').mkdir('luke')
test_pkg.ensure('yoda', '__init__.py')
test_pkg.ensure('yoda', 'luke', '__init__.py')
test_pkg.join('yoda', 'luke', 'dagobah.pyx').write(
"""def testfunc(): pass""")
# Required, currently, for get_package_info to work
register_commands('yoda', '0.0', False, srcdir=str(test_pkg))
package_info = get_package_info(str(test_pkg))
assert len(package_info['ext_modules']) == 1
assert package_info['ext_modules'][0].name == 'yoda.luke.dagobah'
def test_compiler_module(capsys, c_extension_test_package):
"""
Test ensuring that the compiler module is built and installed for packages
that have extension modules.
"""
test_pkg = c_extension_test_package
install_temp = test_pkg.mkdir('install_temp')
with test_pkg.as_cwd():
# This is one of the simplest ways to install just a package into a
# test directory
run_setup('setup.py',
['install',
'--single-version-externally-managed',
'--install-lib={0}'.format(install_temp),
'--record={0}'.format(install_temp.join('record.txt'))])
stdout, stderr = capsys.readouterr()
assert "No git repository present at" in stderr
with install_temp.as_cwd():
import apyhtest_eva
# Make sure we imported the apyhtest_eva package from the correct place
dirname = os.path.abspath(os.path.dirname(apyhtest_eva.__file__))
assert dirname == str(install_temp.join('apyhtest_eva'))
import apyhtest_eva.compiler_version
assert apyhtest_eva.compiler_version != 'unknown'
def test_no_cython_buildext(capsys, c_extension_test_package, monkeypatch):
"""
Regression test for https://github.com/astropy/astropy-helpers/pull/35
This tests the custom build_ext command installed by astropy_helpers when
used with a project that has no Cython extensions (but does have one or
more normal C extensions).
"""
test_pkg = c_extension_test_package
with test_pkg.as_cwd():
run_setup('setup.py', ['build_ext', '--inplace', '--no-cython'])
stdout, stderr = capsys.readouterr()
assert "No git repository present at" in stderr
sys.path.insert(0, str(test_pkg))
try:
import apyhtest_eva.unit01
dirname = os.path.abspath(os.path.dirname(apyhtest_eva.unit01.__file__))
assert dirname == str(test_pkg.join('apyhtest_eva'))
finally:
sys.path.remove(str(test_pkg))
def test_missing_cython_c_files(capsys, pyx_extension_test_package, monkeypatch):
"""
Regression test for https://github.com/astropy/astropy-helpers/pull/181
Test failure mode when building a package that has Cython modules, but
where Cython is not installed and the generated C files are missing.
"""
test_pkg = pyx_extension_test_package
with test_pkg.as_cwd():
with pytest.raises(SystemExit):
run_setup('setup.py', ['build_ext', '--inplace', '--no-cython'])
stdout, stderr = capsys.readouterr()
assert "No git repository present at" in stderr
msg = ('Could not find C/C++ file '
'{0}.(c/cpp)'.format('apyhtest_eva/unit02'.replace('/', os.sep)))
assert msg in stderr
@pytest.mark.parametrize('mode', ['cli', 'cli-w', 'deprecated', 'cli-l'])
def test_build_docs(capsys, tmpdir, mode):
"""
Test for build_docs
"""
test_pkg = tmpdir.mkdir('test_pkg')
test_pkg.mkdir('mypackage')
test_pkg.join('mypackage').join('__init__.py').write(dedent("""\
def test_function():
pass
class A():
pass
class B(A):
pass
"""))
test_pkg.mkdir('docs')
docs_dir = test_pkg.join('docs')
docs_dir.join('conf.py').write(dedent("""\
import warnings
with warnings.catch_warnings(): # ignore matplotlib warning
warnings.simplefilter("ignore")
from sphinx_astropy.conf import *
exclude_patterns.append('_templates')
suppress_warnings = ['app.add_directive', 'app.add_node', 'app.add_role']
"""))
docs_dir.join('index.rst').write(dedent("""\
.. automodapi:: mypackage
:no-inheritance-diagram:
"""))
# For this test we try out the new way of calling register_commands without
# arugments, instead getting the information from setup.cfg.
test_pkg.join('setup.cfg').write(dedent("""
[metadata]
name = mypackage
version = 0.1
"""))
test_pkg.join('setup.py').write(dedent("""\
import sys
sys.path.insert(0, r'{astropy_helpers_path}')
from astropy_helpers.setup_helpers import setup
setup()
""".format(astropy_helpers_path=ASTROPY_HELPERS_PATH)))
with test_pkg.as_cwd():
if mode == 'cli':
run_setup('setup.py', ['build_docs'])
elif mode == 'cli-w':
run_setup('setup.py', ['build_docs', '-w'])
elif mode == 'cli-l':
run_setup('setup.py', ['build_docs', '-l'])
elif mode == 'deprecated':
run_setup('setup.py', ['build_sphinx'])
stdout, stderr = capsys.readouterr()
assert 'AstropyDeprecationWarning' in stderr
assert os.path.exists(docs_dir.join('_build', 'html', 'index.html').strpath)
def test_command_hooks(tmpdir, capsys):
"""A basic test for pre- and post-command hooks."""
test_pkg = tmpdir.mkdir('test_pkg')
test_pkg.mkdir('_welltall_')
test_pkg.join('_welltall_', '__init__.py').ensure()
# Create a setup_package module with a couple of command hooks in it
test_pkg.join('_welltall_', 'setup_package.py').write(dedent("""\
def pre_build_hook(cmd_obj):
print('Hello build!')
def post_build_hook(cmd_obj):
print('Goodbye build!')
"""))
# A simple setup.py for the test package--running register_commands should
# discover and enable the command hooks
test_pkg.join('setup.py').write(dedent("""\
import sys
from os.path import join
from setuptools import setup, Extension
sys.path.insert(0, r'{astropy_helpers_path}')
from astropy_helpers.setup_helpers import register_commands, get_package_info
NAME = '_welltall_'
VERSION = '0.1'
RELEASE = True
cmdclassd = register_commands(NAME, VERSION, RELEASE)
setup(
name=NAME,
version=VERSION,
cmdclass=cmdclassd
)
""".format(astropy_helpers_path=ASTROPY_HELPERS_PATH)))
with test_pkg.as_cwd():
try:
run_setup('setup.py', ['build'])
finally:
cleanup_import('_welltall_')
stdout, stderr = capsys.readouterr()
want = dedent("""\
running build
running pre_hook from _welltall_.setup_package for build command
Hello build!
running post_hook from _welltall_.setup_package for build command
Goodbye build!
""").strip()
assert want in stdout.replace('\r\n', '\n').replace('\r', '\n')
def test_invalid_package_exclusion(tmpdir, capsys):
module_name = 'foobar'
setup_header = dedent("""\
import sys
from os.path import join
from setuptools import setup, Extension
sys.path.insert(0, r'{astropy_helpers_path}')
from astropy_helpers.setup_helpers import register_commands, \\
get_package_info, add_exclude_packages
NAME = {module_name!r}
VERSION = '0.1'
RELEASE = True
""".format(module_name=module_name, astropy_helpers_path=ASTROPY_HELPERS_PATH))
setup_footer = dedent("""\
setup(
name=NAME,
version=VERSION,
cmdclass=cmdclassd,
**package_info
)
""")
# Test error when using add_package_excludes out of order
error_commands = dedent("""\
cmdclassd = register_commands(NAME, VERSION, RELEASE)
package_info = get_package_info()
add_exclude_packages(['tests*'])
""")
error_pkg = tmpdir.mkdir('error_pkg')
error_pkg.join('setup.py').write(
setup_header + error_commands + setup_footer)
with error_pkg.as_cwd():
with pytest.raises(SystemExit):
run_setup('setup.py', ['build'])
stdout, stderr = capsys.readouterr()
assert "RuntimeError" in stderr
# Test warning when using deprecated exclude parameter
warn_commands = dedent("""\
cmdclassd = register_commands(NAME, VERSION, RELEASE)
package_info = get_package_info(exclude=['test*'])
""")
warn_pkg = tmpdir.mkdir('warn_pkg')
warn_pkg.join('setup.py').write(
setup_header + warn_commands + setup_footer)
with warn_pkg.as_cwd():
run_setup('setup.py', ['build'])
stdout, stderr = capsys.readouterr()
assert 'AstropyDeprecationWarning' in stderr
| bsd-3-clause |
justincely/rolodex | cos_monitoring/dark/plotting.py | 1 | 15970 | """ Make darkrate plots
"""
from __future__ import absolute_import, division
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
import scipy
from scipy.ndimage.filters import convolve
import numpy as np
import math
import os
from ..utils import remove_if_there
#-------------------------------------------------------------------------------
def magnitude(x):
"""Calculate the order of magnitude of the value x
Parameters
----------
x : int,float
number from which to find the order
Returns
-------
order : int
order of magnitude of the input value
"""
return int(math.floor(math.log10(x)))
#-------------------------------------------------------------------------------
def plot_histogram(dark, outname):
"""Plot a linear and logarithmic histogram of the dark rates.
These plots are used in determining the dark-rate values to include
in the Exposure Time Calculator and Instrument Handbooks for COS.
Parameters
----------
dark : np.ndarray
array of dark rates in counts/s
outname : str
name of the output plot
"""
remove_if_there(outname)
fig = plt.figure(figsize=(12, 9))
bin_size = 1e-7
n_bins = int((dark.max()-dark.min())/bin_size)
ax = fig.add_subplot(2, 1, 1)
ax.hist(dark, bins=n_bins, align='mid', histtype='stepfilled')
counts, bins = np.histogram(dark, bins=100)
cuml_dist = np.cumsum(counts)
count_99 = abs(cuml_dist / float(cuml_dist.max()) - .99).argmin()
count_95 = abs(cuml_dist / float(cuml_dist.max()) - .95).argmin()
mean = dark.mean()
med = np.median(dark)
std = dark.std()
mean_obj = ax.axvline(x=mean, lw=2, ls='--', color='r', label='Mean ')
med_obj = ax.axvline(x=med, lw=2, ls='-', color='r', label='Median')
two_sig = ax.axvline(x=med + (2*std), lw=2, ls='-', color='gold')
three_sig = ax.axvline(x=med + (3*std), lw=2, ls='-', color='DarkOrange')
dist_95 = ax.axvline(x=bins[count_95], lw=2, ls='-', color='LightGreen')
dist_99 = ax.axvline(x=bins[count_99], lw=2, ls='-', color='DarkGreen')
ax.grid(True, which='both')
ax.set_title('Histogram of Dark Rates')
ax.set_ylabel('Frequency')
ax.set_xlabel('Counts/pix/sec')
ax.set_xlim(dark.min(), dark.max())
ax.xaxis.set_major_formatter(FormatStrFormatter('%3.2e'))
#--- Logarithmic
ax = fig.add_subplot(2, 1, 2)
#log_bins = np.logspace(np.log10(dark.min()), np.log10(dark.max()), 100)
ax.hist(dark, bins=n_bins, align='mid', log=True, histtype='stepfilled')
ax.axvline(x=mean, lw=2, ls='--', color='r', label='Mean')
ax.axvline(x=med, lw=2, ls='-', color='r', label='Median')
ax.axvline(x=med+(2*std), lw=2, ls='-', color='gold')
ax.axvline(x=med+(3*std), lw=2, ls='-', color='DarkOrange')
ax.axvline(x=bins[count_95], lw=2, ls='-', color='LightGreen')
ax.axvline(x=bins[count_99], lw=2, ls='-', color='DarkGreen')
#ax.set_xscale('log')
ax.grid(True, which='both')
ax.set_ylabel('Log Frequency')
ax.set_xlabel('Counts/pix/sec')
ax.set_xlim(dark.min(), dark.max())
ax.xaxis.set_major_formatter(FormatStrFormatter('%3.2e'))
fig.legend([med_obj, mean_obj, two_sig, three_sig, dist_95, dist_99],
['Median',
'Mean',
r'2$\sigma$: {0:.2e}'.format(med+(2*std)),
r'3$\sigma$: {0:.2e}'.format(med+(3*std)),
r'95$\%$: {0:.2e}'.format(bins[count_95]),
r'99$\%$: {0:.2e}'.format(bins[count_99])],
shadow=True,
numpoints=1,
bbox_to_anchor=[0.8, 0.8])
remove_if_there(outname)
fig.savefig(outname, bbox_inches='tight')
plt.close(fig)
#-------------------------------------------------------------------------------
def plot_time(detector, dark, date, temp, solar, solar_date, outname):
"""Plot the dar-rate vs time
Parameters
----------
detector : str
FUV or NUV
dark : np.ndarray
array of measured dark rates in counts/s
date : np.ndarray
array of measured times
temp : np.ndarray
array of temperatures
solar : np.ndarray
array of solar flux values
solar_date : np.ndarray
array of solar dates
outname : str
name of output plot
"""
remove_if_there(outname)
fig = plt.figure(figsize=(20, 12))
sorted_index = np.argsort(solar_date)
solar = solar[sorted_index]
solar_date = solar_date[sorted_index]
if detector == 'FUV':
dark_ax = fig.add_axes([.1, .3, .8, .6])
sub_ax = fig.add_axes([.1, .09, .8, .19])
else:
dark_ax = fig.add_axes([.1, .5, .8, .4])
sub_ax = fig.add_axes([.1, .1, .8, .2])
sub_ax2 = fig.add_axes([.1, .3, .8, .2])
temp_index = np.where(temp > 15)[0]
dark = dark[temp_index]
date = date[temp_index]
temp = temp[temp_index]
dark_ax.plot(date,
dark,
color='k',
marker='o',
linestyle='',
markersize=2,
label='Dark Count Rate',
zorder=1,
rasterized=True)
#dark_ax.axvline(x=2012.326, ymin=0, ymax=1, color='b', linestyle='-',
# lw=2, label='COS Suspend', zorder=1, alpha=.4)
#dark_ax.axvline(x=2012.980, ymin=0, ymax=1, color='b', linestyle='--',
# lw=2, label='Dec Safe', zorder=1, alpha=.4)
#dark_ax.axvline(x=2013.126, ymin=0, ymax=1, color='b', linestyle=':',
# lw=2, label='Feb Safe', zorder=1, alpha=.4)
if not detector == 'NUV':
dark_ax.axhline(y=1.5E-6,
color='r',
linestyle='--',
lw=3,
label='1.5e-6',
zorder=1,
alpha=.6)
dark_ax.xaxis.set_major_formatter(FormatStrFormatter('%.1f'))
dark_ax.yaxis.set_major_formatter(FormatStrFormatter('%3.2e'))
dark_ax.yaxis.set_ticks(np.arange(0, dark.max(),
2 * 10 ** magnitude(dark.mean())))
dark_ax.set_xticklabels(['' for item in dark_ax.get_xticklabels()])
dark_ax.set_ylabel('Mean Dark Rate cnts/sec/pix')
if 'FUVA' in outname:
segment = 'FUVA'
elif 'FUVB' in outname:
segment = 'FUVB'
else:
segment = 'NUV'
dark_ax.set_title('Global Dark Rate: %s' % (segment.upper()))
dark_ax.set_xlim(2009.5, date.max() + .1)
dark_ax.legend(numpoints=1, shadow=True, loc='upper left')
dark_ax.grid(True)
if detector == 'NUV':
sub_ax.plot(date,
temp,
color='r',
linestyle='',
markersize=8,
marker='o')
sub_ax.set_xlabel('Decimal Year')
sub_ax.set_ylabel('Temperature')
sub_ax.xaxis.set_major_formatter(FormatStrFormatter('%.1f'))
sub_ax.set_xlim(2009.5, date.max() + .1)
#sub_ax.set_ylim(15, 27)
sub_ax.grid(True)
solar_smooth = scipy.convolve(solar, np.ones(81) / 81.0, mode='same')
sub_ax2.plot(solar_date,
solar,
color='orange',
marker='',
linestyle='-',
label='10.7cm',
lw=1,
alpha=.9,
zorder=1)
sub_ax2.plot(solar_date[:-41],
solar_smooth[:-41],
color='red',
marker='',
linestyle='-',
label='10.7cm Smoothed',
lw=3,
alpha=1,
zorder=1)
sub_ax2.set_xticklabels(['' for item in dark_ax.get_xticklabels()])
sub_ax2.set_ylabel('Radio Flux')
sub_ax2.set_ylim(50, 210)
sub_ax2.set_xlim(2009.5, date.max() + .1)
sub_ax2.legend(numpoints=1, shadow=True, loc='best')
sub_ax2.grid(True)
else:
solar_smooth = scipy.convolve(solar, np.ones(81) / 81.0, mode='same')
sub_ax.plot(solar_date,
solar,
color='orange',
marker='',
linestyle='-',
label='10.7cm',
lw=1,
alpha=.9,
zorder=1)
sub_ax.plot(solar_date[:-41],
solar_smooth[:-41],
color='red',
marker='',
linestyle='-',
label='10.7cm Smoothed',
lw=3,
alpha=1,
zorder=1)
plt.gca().xaxis.set_major_formatter(FormatStrFormatter('%.1f'))
sub_ax.set_xlabel('Decimal_year')
sub_ax.set_ylabel('Radio Flux')
sub_ax.set_ylim(50, 210)
sub_ax.set_xlim(2009.5, date.max() + .1)
sub_ax.legend(numpoints=1, shadow=True, loc='best')
sub_ax.grid(True)
remove_if_there(outname)
fig.savefig(outname, bbox_inches='tight')
plt.close(fig)
#-------------------------------------------------------------------------------
def plot_orbital_rate(longitude, latitude, darkrate, sun_lon, sun_lat, outname):
"""Plot the dark-rate of the detector vs orbital position
Parameters
----------
longitude : np.ndarray
longitude of HST
latitude : np.ndarray
latitude of HST
darkrate : np.ndarray
measured dark-rate of the detector
sun_long : np.ndarray
longitude of the sub-solar point
sun_lat : np.ndarray
latitude of the sub-solar point
outname : str
name of the output plot
"""
pretty_plot = True
if pretty_plot:
pl_opt = {"fontweight": "bold",
"titlesize": 22,
"labelsize": 18,
"legendsize": 10,
"tickwidth": 2,
"ticksize": 13,
"cbarticksize": 12,
"ticklength": 5,
"markersize": 12}
else:
pl_opt = {"fontweight": "semibold",
"titlesize": 15,
"labelsize": 15,
"legendsize": 8,
"tickwidth": 1.5,
"ticksize": 10,
"cbarticksize": 10,
"ticklength": 4,
"markersize": 10}
color_min = darkrate.min()
color_max = darkrate.min() + 3 * darkrate.std()
remove_if_there(outname)
fig, (ax,ax2,ax3) = plt.subplots(3, sharex=True, figsize=(20, 15))
if 'FUVA' in outname:
detector = 'FUVA'
elif 'FUVB' in outname:
detector = 'FUVB'
elif 'NUV' in outname:
detector = 'NUV'
fig.suptitle('Orbital Variation in Darkrate for {}'.format(detector),
size=pl_opt['titlesize'], fontweight=pl_opt['fontweight'],
family='serif')
colors = ax.scatter(longitude,
latitude,
c=darkrate,
marker='o',
alpha=.7,
edgecolors='none',
s=3,
lw=0,
vmin=color_min,
vmax=color_max,
rasterized=True)
ax.set_xlim(0, 360)
ax.set_ylim(-35, 35)
ax.set_ylabel('Latitude', size=pl_opt['labelsize'],
fontweight=pl_opt['fontweight'], family='serif')
ax.set_xlabel('Longitude', size=pl_opt['labelsize'],
fontweight=pl_opt['fontweight'], family='serif')
'''
plt.ion()
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot( 1,1,1, projection='3d')
ax.scatter( longitude, latitude, zs=darkrate, c=darkrate,
marker='o', alpha=.7, edgecolors='none',
s=5, lw=0, vmin=color_min, vmax=color_max )
raw_input()
'''
#-- Get rid of the SAA passages
index_keep = np.where((longitude < 250) | (latitude > 10))[0]
darkrate = darkrate[index_keep]
latitude = latitude[index_keep]
longitude = longitude[index_keep]
sun_lat = sun_lat[index_keep]
sun_lon = sun_lon[index_keep]
lon_diff = longitude - sun_lon
lat_diff = latitude - sun_lat
index = np.where(lon_diff < 0)[0]
lon_diff[index] += 360
colors = ax2.scatter(lon_diff,
lat_diff,
c=darkrate,
marker='o',
alpha=.7,
edgecolors='none',
s=5,
lw=0,
vmin=color_min,
vmax=color_max,
rasterized=True)
ax2.set_xlim(0, 360)
ax2.set_ylabel('Lat. - Sub-Solar Pnt', size=pl_opt['labelsize'],
fontweight=pl_opt['fontweight'], family='serif')
ax2.set_xlabel('Long. - Sub-Solar Pnt', size=pl_opt['labelsize'],
fontweight=pl_opt['fontweight'], family='serif')
#-- Cut out the low-points
dark_smooth = convolve(darkrate, np.ones(91)/91, mode='mirror')
thresh = dark_smooth + 1.5*dark_smooth.std()
index_keep = np.where((darkrate > thresh))[0]
if len(index_keep) and detector in ['FUVA', 'FUVB']:
darkrate = darkrate[index_keep]
latitude = latitude[index_keep]
longitude = longitude[index_keep]
sun_lat = sun_lat[index_keep]
sun_lon = sun_lon[index_keep]
elif detector == 'NUV':
pass
else:
raise ValueError("This needs to be NUV data at this point. Found: {}".format(detctor))
lon_diff = longitude - sun_lon
lat_diff = latitude - sun_lat
index = np.where(lon_diff < 0)[0]
lon_diff[index] += 360
colors = ax3.scatter(lon_diff,
lat_diff,
c=darkrate,
marker='o',
alpha=.7,
edgecolors='none',
s=5,
lw=0,
vmin=color_min,
vmax=color_max,
rasterized=True)
cax = plt.axes([0.92, 0.2, 0.02, 0.6])
cbar = fig.colorbar(colors, cax=cax)
for ytick in cbar.ax.yaxis.get_ticklabels():
ytick.set_weight("bold")
cbar.ax.tick_params(axis="y", labelsize=pl_opt["cbarticksize"])
cbar.formatter.set_powerlimits((0,0))
cbar.update_ticks()
# cbar.ax.yaxis.set_major_formatter(FormatStrFormatter("%3.1e"))
ax3.set_xlim(0, 360)
ax3.set_ylabel('Lat. - Sub-Solar Pnt', size=pl_opt['labelsize'],
fontweight=pl_opt['fontweight'], family='serif')
ax3.set_xlabel('Long. - Sub-Solar Pnt', size=pl_opt['labelsize'],
fontweight=pl_opt['fontweight'], family='serif')
for cur_ax in [ax, ax2, ax3]:
for xtick,ytick in zip(cur_ax.xaxis.get_ticklabels(),cur_ax.yaxis.get_ticklabels()):
xtick.set_weight("bold")
ytick.set_weight("bold")
cur_ax.tick_params(axis="both", which="major", labelsize=pl_opt['ticksize'],
width=pl_opt['tickwidth'], length=pl_opt['ticklength'])
remove_if_there(outname)
fig.savefig(outname, bbox_inches='tight')
plt.close(fig)
'''
gridx, gridy = np.mgrid[all_lon.min():all_lon.max():.1,
all_lat.min():all_lat.max():.1]
thing = griddata(zip(all_lon, all_lat),
darkrate,
(gridx, gridy),
method='nearest' )
image = medfilt(thing.T, (5,5))
'''
#-------------------------------------------------------------------------------
| bsd-3-clause |
luchko/latticegraph_designer | latticegraph_designer/app/mpl_pane.py | 1 | 32631 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 9 18:13:18 2017
@author: Ivan Luchko (luchko.ivan@gmail.com)
This module contains the definition of the matplotlib manipulation pane.
class Arrow3D(FancyArrowPatch):
class Annotation3D(Annotation):
class GraphEdgesEditor(object):
testing and examples:
def run_test():
"""
from __future__ import division # make python 2 use float division
import numpy as np
from mpl_toolkits.mplot3d.art3d import Line3D
from mpl_toolkits.mplot3d.proj3d import proj_transform
from matplotlib.mlab import dist_point_to_segment
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d.art3d import Line3DCollection
from matplotlib.text import Annotation
from matplotlib.colors import hex2color
from latticegraph_designer.app.core import DealXML
import xml.etree.ElementTree as ET
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
class Annotation3D(Annotation):
def __init__(self, label, xyz, *args, **kwargs):
Annotation.__init__(self,label, xy=(0,0), *args, **kwargs)
self._verts3d = xyz
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.xy=(xs,ys)
Annotation.draw(self, renderer)
class GraphEdgesEditor(object):
"""
#########################################################################
## ##
## 3D drag-and-drop graph edges editor. ##
## ##
## * In order to create a new edge pick the source vertex ##
## and drag the pointer towards the targtet vertex ##
## ##
## * One left mouse button click selects the edge ##
## ##
## * Hold righ mouse button and move pointer up/down for zooming ##
## ##
## * Usefull key-bindings: ##
## 'Ctrl+NumKey' - change the type of active edge to NumKey ##
## 'Shift+Del' - delete all edges ##
## 'Del' - delete selected active edge ##
## 't' - Switch on/off displaying manipulation actions into ##
## terminal window. ##
## 'n' - switch on/off displaying of the lattice ##
## 'm' - switch on/off displaying of the unit cell arrows ##
## ##
## * Close manipulation window in order to finish editing ##
## ##
#########################################################################
"""
buttonHold = False # the no hold button event so lets create one
isRotated = False # True when axes3D is rotated
USE_COLLECTIONS = False # use Line3DCollection for edge representation
def __init__(self, ax, cluster, parent=None, display_report=False):
'''
input:
xyz: Nx3 numpy array containing vertices coordinates
edges_ind: list of tuples containing indexes of source and target
points which form the edge: e.g. [(s1,t1),(s2,t2),...]
display_report: if 'True' Docstring and all additing actions are
displayed
display_lattice: if 'True' display lattice of the classter
display_arrows: if 'True' display unit cell arrows
'''
self.parent = parent
self.ax = ax
self.ax.set_axis_off()
self.ax.figure.tight_layout()
self.canvas = self.ax.figure.canvas
# create bindings
self.cluster = cluster
self.UC = cluster.UC
self.edges = cluster.edges
self.vertices = cluster.vertices
# other elements
self.display_report = display_report
# initialize theme
if parent is None:
self.prefFileName = "./latticegraph_designer/resources/preferences.xml"
self.SETTINGS = ET.parse(self.prefFileName).getroot()
self.CURRENT_THEME = DealXML.get_child_by_name(self.SETTINGS,"THEME","Current theme")
else: # create bindings
self.prefFileName = self.parent.prefFileName
self.SETTINGS = self.parent.SETTINGS
self.CURRENT_THEME = self.parent.CURRENT_THEME
self.initialize_theme(self.CURRENT_THEME)
# create artists
self.edges_lines, self.sc, self.latticeNet = None, None, None
self.create_artists_graph()
self.create_artists_highlight()
self.create_artists_arrows()
self.set_artists_properties()
# canvas event bindings
self.canvas.mpl_connect('draw_event', self.draw_callback)
self.canvas.mpl_connect('motion_notify_event', self.motion_notify_callback)
self.canvas.mpl_connect('button_press_event', self.button_press_callback)
self.canvas.mpl_connect('button_release_event', self.button_release_callback)
self.canvas.mpl_connect('key_press_event', self.key_press_callback)
# hotkey list used for changing edge type
self.ctrl_list = ["ctrl+{}".format(n) for n in range (len(self.colors_e))]
def initialize_theme(self, theme_ET):
'''Initilaize widget theme and preferences'''
self.CURRENT_THEME = theme_ET
self.display_arrows = theme_ET.find("VISIBLEARROWS").get("value") == "True"
self.display_lattice = theme_ET.find("VISIBLELATTICE").get("value") == "True"
self.color_background = theme_ET.find("COLORBACKGROUND").get("value")
self.color_lattice = theme_ET.find("COLORLATTICE").get("value")
self.color_active = theme_ET.find("COLORACTIVATE").get("value")
dic1, dic2 = {}, {}
for dic, elem in zip([dic1, dic2],[theme_ET.find("EDGES"),theme_ET.find("VERTICES")]):
dic["bool"], dic["color"] = [], []
dic["size"] = float(elem.find("SIZE").get("size"))
for pref in elem.findall("PREFERENCE"):
dic["bool"].append(pref.get("bool") == "True")
dic["color"].append(pref.get("color"))
self.colors_e = np.array(dic1['color']*20)
self.visible_e = np.array(dic1['bool']*20, dtype=bool)
self.lw = dic1["size"]*7/100
self.lw_active = self.lw*1.7
self.colors_v = np.array(dic2['color']*20)
self.visible_v = np.array(dic2['bool']*20, dtype=bool)
self.sc_size = dic2["size"]*20/100
self.sc_size_active = self.sc_size*1.7
def adjust_scale(self):
'''hack requirired for ajusting sclale in matplotlib axes3D'''
if self.display_lattice:
L,W,H = self.cluster.size
sitesCoord = self.cluster.lattice.get_finite_lattice_sites((L+1,W+1,H+1))
lims_max = np.max(sitesCoord,axis=0)
lims_min = np.min(sitesCoord,axis=0)
else:
lims_max = np.max(self.vertices.coords,axis=0)
lims_min = np.min(self.vertices.coords,axis=0)
x0, y0, z0 = (lims_max+lims_min)/2
delta = max(lims_max-lims_min)/2
self.ax.set_xlim3d(x0-delta,x0+delta)
self.ax.set_ylim3d(y0-delta,y0+delta)
self.ax.set_zlim3d(z0-delta,z0+delta)
def create_artists_graph(self):
'''create and add to self.ax main artrist related to lattice graph'''
# initialize vertices
self.xyz = self.vertices.coords
self.x, self.y, self.z = self.xyz.T
self.update_XY_scr()
# create vertices
if self.sc is not None: # remove previous vertices points
self.ax.collections.remove(self.sc)
self.sc = self.ax.scatter(self.x,self.y,self.z, marker='o')
# create edges
if self.USE_COLLECTIONS:
if self.edges_lines is not None: # remove previous edges lines
for key, edge_col in self.edges_lines.items():
self.ax.collections.remove(edge_col)
self.edges_lines = {}
for key, edge in self.UC.edges.items():
segments = [self.xyz[self.edges.source_target[j],:] for j in self.edges.array_ind[key]]
edge_col = Line3DCollection(segments)
self.ax.add_collection3d(edge_col)
self.edges_lines[key] = edge_col
else:
if self.edges_lines is not None: # remove previous lines edges
for line in self.edges_lines:
self.ax.artists.remove(line)
self.edges_lines = []
for j in range(len(self.edges.ids)):
st = list(self.edges.source_target[j])
line = Line3D(self.x[st], self.y[st], self.z[st])
self.ax.add_artist(line)
self.edges_lines.append(line)
# create latticeNet
if self.latticeNet is not None: # remove previous lattice lines
self.ax.collections.remove(self.latticeNet)
self.latticeNet = Line3DCollection(self.cluster.latticeLines,
linestyle = '--', lw=0.2)
self.ax.add_collection3d(self.latticeNet)
self.v_source_ind, self.v_target_ind = None, None
self.v_ind, self.v_active_ind = None, None
self.e_ind, self.e_active_ind = None, None
self.e_activeDist_ids = []
def create_artists_highlight(self):
''' creates artists used to higlight active elements'''
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.sc_active = self.ax.plot([],[],[], animated=True, marker='o')[0]
self.new_edge = Line3D([], [], [], color=self.color_active,
lw=self.lw, animated=True)
self.ax.add_artist(self.new_edge)
def create_artists_arrows(self):
'''create and add unit cell arrows artists'''
self.arrows = []
v0 = self.cluster.arrowVec[0,:] #origin
for v, label in zip(self.cluster.arrowVec[1:,:],['a','b','c']):
# create arrow
a = Arrow3D([v0[0], v[0]], [v0[1], v[1]], [v0[2], v[2]],
mutation_scale=10, arrowstyle="simple")
self.ax.add_artist(a)
self.arrows.append(a)
# create label
tag = Annotation3D(label,xyz=v,xytext=(-4,4),textcoords='offset points',
ha='right',va='bottom')
self.ax.add_artist(tag)
self.arrows.append(tag)
def set_artists_properties(self):
'''setup properties of lattice graph according to self.CURRENT_THEME'''
# set background, lattice net and arrows
self.ax.patch.set_facecolor(self.color_background)
self.ax.figure.patch.set_facecolor(self.color_background)
self.latticeNet.set_visible(self.display_lattice)
self.latticeNet.set_color(self.color_lattice)
self.adjust_scale()
self.set_visible(self.arrows,self.display_arrows)
for elem in self.arrows:
elem.set_color(self.color_lattice)
# set props of the vertices
colors = [list(hex2color(c))+[1] for c in self.colors_v[self.vertices.types]]
self.sc._facecolor3d = colors
self.sc._edgecolor3d = colors
self.sc.set_sizes([self.sc_size**2]*len(self.vertices.types))
# set props of the edges colentions
if self.USE_COLLECTIONS:
for key, edge_col in self.edges_lines.items():
edge_col.set_color(self.colors_e[self.UC.edges[key].type])
edge_col.set_visible(self.visible_e[self.UC.edges[key].type])
edge_col.set_linewidth(self.lw)
else:
for j in range(len(self.edges.ids)):
self.edges_lines[j].set_color(self.colors_e[self.edges.types[j]])
self.edges_lines[j].set_visible(self.visible_e[self.edges.types[j]])
self.edges_lines[j].set_linewidth(self.lw)
# set activation elements and new_edge
self.sc_active.set_markersize(self.sc_size_active)
self.sc_active.set_color(self.color_active)
self.new_edge.set_linewidth(self.lw_active)
self.new_edge.set_color(self.color_active)
self.canvas.draw()
def reset_size(self, size):
'''resize the displayed lattice cluster'''
self.cluster.reset_size(size)
self.create_artists_graph()
self.adjust_scale()
self.set_artists_properties()
def update_XY_scr(self):
'''returns projection of the vertices on the screen space'''
# project 3d data space to 2d data space
self.xd, self.yd, _ = proj_transform(self.x,self.y,self.z,
self.ax.get_proj())
# convert 2d space to screen space
self.x_scr, self.y_scr = self.ax.transData.transform(
np.vstack((self.xd, self.yd)).T).T
def get_ind_under_point(self, event):
'get the index of the vertex under point if within epsilon tolerance'
d = np.sqrt((self.x_scr - event.x)**2 + (self.y_scr - event.y)**2)
indseq = np.nonzero(np.equal(d, np.amin(d)))[0]
ind = indseq[0]
if d[ind] >= self.sc_size:
ind = None
return ind
def getMouseXYZ(self, event):
'''return xyz of the mouse in 3D space (like in coord_string)'''
s = self.ax.format_coord(event.xdata, event.ydata)
return tuple(float(a[a.find('=')+1:]) for a in s.split(','))
def draw_callback(self, event):
'''on canvas draw'''
if not self.isRotated:
self.update_XY_scr()
# store background for blitting
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def motion_notify_callback(self, event):
'on mouse movement'
if event.inaxes is None:
return
# when axes3D is rotated
if self.buttonHold and (self.v_source_ind is None):
self.isRotated = True
return
self.v_ind = self.get_ind_under_point(event)
# activation/deactivation of vertex
if self.v_ind != self.v_active_ind: # not the same or both None
self.v_active_ind = self.v_ind
if self.v_source_ind is None:
self.canvas.restore_region(self.background)
# else background would be restored during new edge redrawing
if self.v_active_ind is not None: # activation (color)
self.sc_active.set_data(self.x[self.v_active_ind],
self.y[self.v_active_ind])
self.sc_active.set_3d_properties(self.z[self.v_active_ind])
self.ax.draw_artist(self.sc_active)
self.canvas.blit(self.ax.bbox)
if self.v_source_ind is not None: #redraw potential edge line
xm,ym,zm = self.getMouseXYZ(event)
self.canvas.restore_region(self.background_newEdgeDrawing)
self.new_edge.set_data([[self.x[self.v_source_ind], xm],
[self.y[self.v_source_ind], ym]])
self.new_edge.set_3d_properties([self.z[self.v_source_ind], zm])
self.ax.draw_artist(self.new_edge)
if self.v_active_ind is not None:
self.ax.draw_artist(self.sc_active)
self.canvas.blit(self.ax.bbox)
def button_press_callback(self, event):
'whenever a mouse button is pressed'
if event.inaxes is None:
return
self.buttonHold = True
# source selection
if self.v_active_ind is not None:
self.v_source_ind = self.v_active_ind
self.ax.mouse_init(rotate_btn=2) # disable mouse rotation
self.background_newEdgeDrawing = self.canvas.copy_from_bbox(self.ax.bbox)
return
# edge selection
p = event.x, event.y # display coords
for i in range(len(self.edges.source_target)):
(s, t) = self.edges.source_target[i]
d = dist_point_to_segment(p, (self.x_scr[s],self.y_scr[s]),
(self.x_scr[t],self.y_scr[t]))
if d <= self.lw_active:
self.select_edge(self.edges.ids[i])
return
self.e_ind = None
def button_release_callback(self, event):
'whenever a mouse button is released'
if self.v_source_ind is not None: # new Edge was Drawing
self.ax.mouse_init(rotate_btn=1) # enable mouse rotation
# check if new edge was created
if (self.v_active_ind is None) or \
(self.v_active_ind == self.v_source_ind):
self.v_source_ind = None
self.canvas.restore_region(self.background)
self.canvas.blit(self.ax.bbox)
else:
self.v_target_ind = self.v_active_ind
self.add_edge()
self.canvas.draw()
elif self.isRotated: # Axes3D was rotated
self.update_XY_scr()
self.isRotated = False
# Axes3D was not rotated
# deactivate active edge if no new edge is selected
elif self.e_ind is None and self.e_active_ind is not None:
self.select_edge(None)
self.buttonHold = False
def set_visible(self, elements_list, boolVisible):
'''set visibility of artists in elements_list'''
for elem in elements_list:
elem.set_visible(boolVisible)
def reset_e_color(self, ind, color, lw):
'''reset the color of selected edge'''
if self.USE_COLLECTIONS:
self.edges_lines[ind].set_color(color)
self.edges_lines[ind].set_linewidth(lw)
else:
for j in self.edges.array_ind[ind]:
self.edges_lines[j].set_color(color)
self.edges_lines[j].set_linewidth(lw)
def reset_active_e_color(self, color, lw):
'''reset the color of selected active edge'''
self.reset_e_color(self.e_active_ind, color, lw)
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def select_edge(self, ind):
'''select (ativate) edge with index ind. If ind=None unselect active'''
self.e_ind = ind
if self.e_active_ind is not None: # deactivate previouse
color = self.colors_e[self.UC.edges[self.e_active_ind].type]
self.reset_active_e_color(color, self.lw)
self.e_active_ind = self.e_ind # activate new edge
if ind is not None:
self.reset_active_e_color(self.color_active, self.lw_active)
if self.parent is not None: # pass signals and notifications
self.parent.selectedEdgeChanged.emit(self.e_active_ind)
elif (ind is None) and self.display_report:
print(" active edge unselected")
elif self.display_report:
print(" selected edge: {}".format(self.UC.edges[self.e_active_ind]))
def select_edges(self, ids):
'''select (ativate) edge with index in ids list'''
for ind in self.e_activeDist_ids: # deactivate previouse
color = self.colors_e[self.UC.edges[ind].type]
self.reset_e_color(ind, color, self.lw)
self.e_activeDist_ids = ids # activate new edge
self.e_ind = None if len(ids) == 0 else ids[0]
self.e_active_ind = self.e_ind
for ind in ids:
self.reset_e_color(ind, self.color_active, self.lw_active)
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def change_active_edge_type(self, new_type):
'''change selected adge type'''
if self.e_active_ind is not None:
old_type = self.UC.edges[self.e_active_ind].type
if len(self.e_activeDist_ids) == 0:
self.edges.change_edge_type(self.e_active_ind, new_type)
msg = ' type of the edge id={0} was changed from {1} to {2}'.format(
self.e_active_ind, old_type, new_type)
#self.reset_active_e_color(self.colors_e[new_type], self.lw)
#self.e_active_ind = None
else:
for ind in self.e_activeDist_ids:
self.edges.change_edge_type(ind, new_type)
msg = ' type of the edges was changed from {1} to {2}'.format(
self.e_active_ind, old_type, new_type)
if self.display_report:
print(msg)
if self.parent is not None:
self.parent.unitCellChanged.emit()
self.parent.selectedEdgeChanged.emit(self.e_active_ind)
def add_edge(self):
'''create new edge (or not if condition is not fulfilled)'''
newEdge_id = self.edges.add_edge(self.v_source_ind,self.v_target_ind)
if newEdge_id is not None:
if self.USE_COLLECTIONS:
segments = [self.xyz[self.edges.source_target[j],:] for j in self.edges.array_ind[newEdge_id]]
new_edge_col = Line3DCollection(segments,color=self.colors_e[0],lw=self.lw)
self.ax.add_collection3d(new_edge_col)
self.edges_lines[newEdge_id] = new_edge_col
else:
for j in self.edges.array_ind[newEdge_id]:
edge = self.edges.source_target[j]
st = list(edge)
line = Line3D(self.x[st], self.y[st], self.z[st],
color = self.colors_e[0], lw=self.lw)
self.ax.add_artist(line)
self.edges_lines.append(line)
# deactivate previous active edge
if self.e_active_ind is not None:
color = self.colors_e[self.UC.edges[self.e_active_ind].type]
self.reset_active_e_color(color, self.lw)
self.e_active_ind = newEdge_id # activate new edge
self.reset_active_e_color(self.color_active, self.lw_active)
if self.display_report:
print(' added edge: {}'.format(self.UC.edges[newEdge_id]))
if self.parent is not None:
self.parent.unitCellChanged.emit()
self.parent.selectedEdgeChanged.emit(self.e_active_ind)
self.v_source_ind = None
self.v_target_ind = None
def delete_edge_callback(self, _id):
'''deleted edge with _id'''
if self.edges.array_ind.get(_id) is None:
return
if len(self.edges.array_ind.get(_id)) == 0:
self.edges.remove_edge(_id)
return
#remove from both list
if self.USE_COLLECTIONS:
self.ax.collections.remove(self.edges_lines[_id])
del self.edges_lines[_id]
else:
for j in self.edges.array_ind[_id]:
self.ax.artists.remove(self.edges_lines[j])
begin = self.edges.array_ind[_id][0]
end = self.edges.array_ind[_id][-1]
self.edges_lines[begin:end+1] = []
self.edges.remove_edge(_id)
def delete_active_edge_callback(self):
'''deleted selected (active) edge'''
if self.e_active_ind is not None:
if len(self.e_activeDist_ids) == 0:
msg = " deleted edge: {}".format(self.UC.edges[self.e_active_ind])
self.delete_edge_callback(self.e_active_ind)
else:
activeDist = self.UC.edges[self.e_activeDist_ids[0]].length
numActDist = len(self.e_activeDist_ids)
msg = " deleted {0} edges with length: {1}".format(numActDist, activeDist)
for ind in self.e_activeDist_ids[:]:
self.delete_edge_callback(ind)
if self.display_report:
print(msg)
self.e_activeDist_ids = []
self.e_active_ind = None
self.canvas.draw()
if self.parent is not None:
self.parent.statusBar().showMessage(msg, 2000)
self.parent.unitCellChanged.emit()
self.parent.selectedEdgeChanged.emit(None)
def clearEdges_callback(self):
'''delete all edges'''
self.UC.clearEdges()
self.cluster.edges.process_edges(self.cluster.size)
self.create_artists_graph()
self.set_artists_properties()
msg = " all edges are deleted"
if self.display_report:
print(msg)
self.e_activeDist_ids = []
self.e_active_ind = None
self.canvas.draw()
if self.parent is not None:
self.parent.statusBar().showMessage(msg, 2000)
self.parent.unitCellChanged.emit()
self.parent.selectedEdgeChanged.emit(None)
def searchActiveDistEdge_callback(self):
'''search for edges with the same length as selected (active)'''
if self.e_active_ind is None:
print(' For search by distance please select a sample edge')
else:
self.searchDistEdge_callback(self.e_active_ind)
self.e_active_ind = None
def searchDistEdge_callback(self, ind):
'''search for edges with the length as edge with id=ind'''
self.edges.search_similar_edges(self.UC.edges[ind])
self.create_artists_graph()
self.set_artists_properties()
# show message
dist = self.UC.edges[ind].length
num = len(self.UC.lengthDic[dist])
msg = ' {0} edges were found with dist={1:.3f}'.format(num,dist)
if self.display_report:
print(msg)
if self.parent is not None:
self.parent.unitCellChanged.emit()
self.parent.selectedEdgeChanged.emit(None)
self.parent.statusBar().showMessage(msg, 2000)
def key_press_callback(self, event):
'''create key-bindings'''
if event.key in self.ctrl_list: # change edge type
new_type = self.ctrl_list.index(event.key)
self.change_active_edge_type(new_type)
elif event.key == 'delete':
self.delete_active_edge_callback()
elif event.key == 'shift+delete':
self.clearEdges_callback()
elif event.key == 'ctrl+d':
self.searchActiveDistEdge_callback()
elif event.key == 't': # change displaying report settings
self.display_report = not self.display_report
# bind to parent
if self.parent is not None:
self.parent.TEXT_MODE = self.display_report
self.parent.radioButton_output.setChecked(self.display_report)
else:
print(" displaying actions in terminal is turned {}".format("on" if self.display_report else "off"))
elif event.key == 'n': # change displaying lattice net settings
self.display_lattice = not self.display_lattice
self.latticeNet.set_visible(self.display_lattice)
self.adjust_scale()
self.CURRENT_THEME.find("VISIBLELATTICE").set("value",str(self.display_lattice))
ET.ElementTree(self.SETTINGS).write(self.prefFileName)
# bind to parent
if self.parent is not None:
self.parent.latticeVisibleChanged.emit(self.display_lattice)
if self.display_report:
if self.display_lattice:
print(' Lattice net enabled')
else:
print(' Lattice net disabled')
self.canvas.draw()
elif event.key == 'm': # change displaying arrows settings
self.display_arrows = not self.display_arrows
self.set_visible(self.arrows,self.display_arrows)
self.canvas.draw()
self.CURRENT_THEME.find("VISIBLEARROWS").set("value",str(self.display_arrows))
ET.ElementTree(self.SETTINGS).write(self.prefFileName)
# bind to parent
if self.parent is not None:
self.parent.arrowsVisibleChanged.emit(self.display_arrows)
if self.display_report:
if self.display_arrows:
print(' Unit cell arrows enabled')
else:
print(' Unit sell arrows disabled')
#############################################################################
if __name__ == '__main__':
def run_test():
'''testing GraphEdgesEditor'''
from core import Vertex, Edge, UnitCell, Lattice, CrystalCluster
import matplotlib.pyplot as plt
import numpy as np
lattice = Lattice(basisMatrix=np.array([[1,0,0],[0.1, 1.2,0],[0.05,0.05,1]]).T)
UC = UnitCell()
UC.add_vertex(Vertex(0,0,[0.2,0.2,0.2]))
UC.add_vertex(Vertex(0,0,[0.3,0.3,0.6]))
UC.add_edge(Edge(0,1,(1,2),(0,0,0)))
UC.add_edge(Edge(0,2,(2,1),(0,0,1)))
UC.add_edge(Edge(0,0,(1,1),(1,0,0)))
UC.add_edge(Edge(0,0,(1,1),(0,1,0)))
UC.add_edge(Edge(0,0,(2,2),(1,0,0)))
UC.add_edge(Edge(0,0,(2,2),(0,1,0)))
cluster = CrystalCluster(UC,lattice,(2,2,2))
fig = plt.figure('Graph edges editor', figsize=(5,5), dpi=100)
ax = fig.gca(projection='3d') # same as ax = Axes3D(fig)
display_report = True
gee = GraphEdgesEditor(ax, cluster, display_report=display_report)
print(gee.__doc__)
if display_report:
print('\n======================================')
print('# Start editting editing: \n')
print('Initial UC:\n{}'.format(cluster.UC))
print('Editing actions:')
plt.show(block=True)
if display_report:
print('\nEdited UC:\n{}'.format(cluster.UC))
print('======================================\n')
run_test() | mit |
siutanwong/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
psiq/gdsfactory | pp/pixelate.py | 1 | 4481 | import itertools as it
import numpy as np
from pp.geo_utils import polygon_grow
DEG2RAD = np.pi / 180
RAD2DEG = 1.0 / DEG2RAD
# from matplotlib import pyplot as plt
def pixelate_path(
pts, pixel_size=0.55, snap_res=0.05, middle_offset=0.5, theta_start=0, theta_end=90
):
"""
From a path, add one pixel per point on the path
"""
thetas0 = [
np.arctan2(y1 - y0, x1 - x0) for (x0, y0), (x1, y1) in zip(pts[:-1], pts[1:])
]
thetas0 += [theta_end * DEG2RAD]
thetas0[0] = theta_start * DEG2RAD
thetas1 = [
np.arctan2(x1 - x0, y1 - y0) for (x0, y0), (x1, y1) in zip(pts[:-1], pts[1:])
]
thetas1[0] = (theta_start - 180) * DEG2RAD
thetas1 += [(theta_end - 180) * DEG2RAD]
thetas_deg0 = np.abs(np.array(thetas0) * RAD2DEG) % 90
thetas_deg1 = np.abs(np.array(thetas1) * RAD2DEG) % 90
thetas_deg = thetas_deg1
slice = np.where(thetas_deg0 < 45)
thetas_deg[slice] = thetas_deg0[slice]
scalings = np.cos(
abs(thetas_deg) * DEG2RAD
) # + middle_offset * (1 - np.cos(abs(thetas_deg) * DEG2RAD) )
print(scalings)
def _snap(x):
return round(int(x / snap_res), 0) * snap_res
def _gen_pixel(p, a):
x0, y0 = p
pix = [(x0 + a, y0 - a), (x0 + a, y0 + a), (x0 - a, y0 + a), (x0 - a, y0 - a)]
pix = [(_snap(_x), _snap(_y)) for _x, _y in pix]
return pix
a = pixel_size / 2
return [_gen_pixel(p, a * s) for p, s in zip(pts, scalings)]
def points_to_shapely(pts):
from shapely.geometry.polygon import Polygon
p = Polygon(pts)
return p
def _snap_to_resolution(a, snap_res):
return np.round(a / snap_res, 0) * snap_res
def _pixelate(
pts,
N=100,
margin=0.4,
margin_x=None,
margin_y=None,
ports=[],
crop_pixels_at_ports=False,
nb_pixels_x=None,
nb_pixels_y=None,
min_pixel_size=0.4,
snap_res=0.05,
):
"""
Pixelates a shape (as 2d array) onto an NxN grid.
Arguments:
pts: The 2D array to be pixelated
N: The number of pixels on an edge of the grid
Returns:
A list of pixel bounding boxes
"""
from shapely import geometry
shape = points_to_shapely(pts) # convert to shapely
if not shape:
return []
if margin_x is None:
margin_x = margin
if margin_y is None:
margin_y = margin
if nb_pixels_x is None:
nb_pixels_x = N
if nb_pixels_y is None:
nb_pixels_y = N
west, south, east, north = shape.bounds
width = east - west + 2 * margin_x
height = north - south + 2 * margin_y
if min_pixel_size is not None:
max_nb_pixels_x = int(np.ceil(width / min_pixel_size))
nb_pixels_x = min(nb_pixels_x, max_nb_pixels_x)
if min_pixel_size is not None:
max_nb_pixels_y = int(np.ceil(height / min_pixel_size))
nb_pixels_y = min(nb_pixels_y, max_nb_pixels_y)
w = width / nb_pixels_x + snap_res
h = height / nb_pixels_y + snap_res
ax = margin_x
ay = margin_y
xs = np.linspace(west + w / 2 - ax, east - w / 2 + ax, nb_pixels_x)
ys = np.linspace(south + h / 2 - ay, north - h / 2 + ay, nb_pixels_y)
xs = _snap_to_resolution(xs, snap_res)
ys = _snap_to_resolution(ys, snap_res)
grid = it.product(xs, ys)
pixels = []
for x, y in grid:
_w, _s, _e, _n = x - w / 2, y - h / 2, x + w / 2, y + h / 2
newpix = geometry.box(_w, _s, _e, _n)
if shape.intersects(newpix):
r = (_w, _s, _e, _n)
pixels.append(r)
return pixels
def rect_to_coords(r):
w, s, e, n = r
return [(w, s), (w, n), (e, n), (e, s)]
def pixelate(pts, N=100, margin=0.4, **kwargs):
"""
pixelate shape defined by points
Return rectangles [Rect1, Rect2, ...] ready to go in the quad tree
"""
pixels = _pixelate(pts, N=N, margin=margin, **kwargs)
return [rect_to_coords(pixel) for pixel in pixels]
## TODO REFACTOR - THE CODE BELOW SHOULD BE IN A SEPARATE FILE
def gen_pixels_op_blocking(pts, snap_res=0.05, margin=1.0, min_pixel_size=0.4):
op_block_pts = polygon_grow(pts, margin)
return pixelate(
op_block_pts, min_pixel_size=min_pixel_size, N=100, snap_res=snap_res
)
def gen_op_blocking(pts, snap_res=0.05, margin=0.3):
return polygon_grow(pts, margin)
if __name__ == "__main__":
import numpy as np
pts = [(x, x ** 2) for x in np.linspace(0, 1, 5)]
c = pixelate(pts)
print(c)
| mit |
aspera1631/TweetScore | prob_weights.py | 1 | 1505 | __author__ = 'bdeutsch'
import numpy as np
import pandas as pd
import MySQLdb
def sql_to_df(database, table):
con = MySQLdb.connect(host='localhost', user='root', passwd='', db=database)
df = pd.read_sql_query("select * from %s" % table, con, index_col=None, coerce_float=True, params=None, parse_dates=None, chunksize=None)
return df
def pickle_to_sql(filein, tableName, mode):
## pickle_to_sql: open a file in pickle format, load into an SQL database.
# open file and load into a dataframe
tweets = pd.read_pickle(filein)
# Connect to server
con = MySQLdb.connect(host='localhost', user='root', passwd='', db='TweetScore') # may need to add some other options to connect
# Convert to to sql
tweets.to_sql(con=con, name=tableName, if_exists=mode, flavor='mysql')
return True
# load binned data from sql
df = sql_to_df('tweetscore', 'binned')
# re-index
df = df.set_index("desig")
# group
df_group = df.groupby(level=0)
# new df
df2 = pd.DataFrame()
df2["emo_num"] = df_group["emo_num"].first()
df2["ht_num"] = df_group["ht_num"].first()
df2["media_num"] = df_group["media_num"].first()
df2["txt_len_basic"] = df_group["txt_len_basic"].first()
df2["url_num"] = df_group["url_num"].first()
df2["user_num"] = df_group["user_num"].first()
df2["rt_prob"] = df_group["rt"].mean()
df2["weights"] = df_group["rt"].count().apply(np.sqrt)
# write to pickle file
df2.to_pickle("probabilities")
pickle_to_sql("probabilities", "probabilities", "replace") | mit |
dingocuster/scikit-learn | examples/svm/plot_rbf_parameters.py | 132 | 8096 | '''
==================
RBF SVM parameters
==================
This example illustrates the effect of the parameters ``gamma`` and ``C`` of
the Radial Basis Function (RBF) kernel SVM.
Intuitively, the ``gamma`` parameter defines how far the influence of a single
training example reaches, with low values meaning 'far' and high values meaning
'close'. The ``gamma`` parameters can be seen as the inverse of the radius of
influence of samples selected by the model as support vectors.
The ``C`` parameter trades off misclassification of training examples against
simplicity of the decision surface. A low ``C`` makes the decision surface
smooth, while a high ``C`` aims at classifying all training examples correctly
by giving the model freedom to select more samples as support vectors.
The first plot is a visualization of the decision function for a variety of
parameter values on a simplified classification problem involving only 2 input
features and 2 possible target classes (binary classification). Note that this
kind of plot is not possible to do for problems with more features or target
classes.
The second plot is a heatmap of the classifier's cross-validation accuracy as a
function of ``C`` and ``gamma``. For this example we explore a relatively large
grid for illustration purposes. In practice, a logarithmic grid from
:math:`10^{-3}` to :math:`10^3` is usually sufficient. If the best parameters
lie on the boundaries of the grid, it can be extended in that direction in a
subsequent search.
Note that the heat map plot has a special colorbar with a midpoint value close
to the score values of the best performing models so as to make it easy to tell
them appart in the blink of an eye.
The behavior of the model is very sensitive to the ``gamma`` parameter. If
``gamma`` is too large, the radius of the area of influence of the support
vectors only includes the support vector itself and no amount of
regularization with ``C`` will be able to prevent overfitting.
When ``gamma`` is very small, the model is too constrained and cannot capture
the complexity or "shape" of the data. The region of influence of any selected
support vector would include the whole training set. The resulting model will
behave similarly to a linear model with a set of hyperplanes that separate the
centers of high density of any pair of two classes.
For intermediate values, we can see on the second plot that good models can
be found on a diagonal of ``C`` and ``gamma``. Smooth models (lower ``gamma``
values) can be made more complex by selecting a larger number of support
vectors (larger ``C`` values) hence the diagonal of good performing models.
Finally one can also observe that for some intermediate values of ``gamma`` we
get equally performing models when ``C`` becomes very large: it is not
necessary to regularize by limiting the number of support vectors. The radius of
the RBF kernel alone acts as a good structural regularizer. In practice though
it might still be interesting to limit the number of support vectors with a
lower value of ``C`` so as to favor models that use less memory and that are
faster to predict.
We should also note that small differences in scores results from the random
splits of the cross-validation procedure. Those spurious variations can be
smoothed out by increasing the number of CV iterations ``n_iter`` at the
expense of compute time. Increasing the value number of ``C_range`` and
``gamma_range`` steps will increase the resolution of the hyper-parameter heat
map.
'''
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load and prepare data set
#
# dataset for grid search
iris = load_iris()
X = iris.data
y = iris.target
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 classes and
# make it a binary classification problem.
X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=5, test_size=0.2, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),
size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
| bsd-3-clause |
etkirsch/scikit-learn | sklearn/utils/random.py | 234 | 10510 | # Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
rahul003/mxnet | example/gluon/dcgan.py | 7 | 8812 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
import argparse
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd
import numpy as np
import logging
from datetime import datetime
import os
import time
def fill_buf(buf, i, img, shape):
n = buf.shape[0]//shape[1]
m = buf.shape[1]//shape[0]
sx = (i%m)*shape[0]
sy = (i//m)*shape[1]
buf[sy:sy+shape[1], sx:sx+shape[0], :] = img
return None
def visual(title, X, name):
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
X = np.clip((X - np.min(X))*(255.0/(np.max(X) - np.min(X))), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
buff = buff[:,:,::-1]
plt.imshow(buff)
plt.title(title)
plt.savefig(name)
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset to use. options are cifar10 and imagenet.')
parser.add_argument('--batch-size', type=int, default=64, help='input batch size')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--nepoch', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='./results', help='folder to output images and model checkpoints')
parser.add_argument('--check-point', default=True, help="save results at each epoch or not")
opt = parser.parse_args()
print(opt)
logging.basicConfig(level=logging.DEBUG)
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 3
if opt.cuda:
ctx = mx.gpu(0)
else:
ctx = mx.cpu()
check_point = bool(opt.check_point)
outf = opt.outf
if not os.path.exists(outf):
os.makedirs(outf)
def transformer(data, label):
# resize to 64x64
data = mx.image.imresize(data, 64, 64)
# transpose from (64, 64, 3) to (3, 64, 64)
data = mx.nd.transpose(data, (2,0,1))
# normalize to [-1, 1]
data = data.astype(np.float32)/128 - 1
# if image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = mx.nd.tile(data, (3, 1, 1))
return data, label
train_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=True, transform=transformer),
batch_size=opt.batch_size, shuffle=True, last_batch='discard')
val_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=False, transform=transformer),
batch_size=opt.batch_size, shuffle=False)
# build the generator
netG = nn.Sequential()
with netG.name_scope():
# input is Z, going into a convolution
netG.add(nn.Conv2DTranspose(ngf * 8, 4, 1, 0, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 4 x 4
netG.add(nn.Conv2DTranspose(ngf * 4, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 8 x 8
netG.add(nn.Conv2DTranspose(ngf * 2, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 16 x 16
netG.add(nn.Conv2DTranspose(ngf, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 32 x 32
netG.add(nn.Conv2DTranspose(nc, 4, 2, 1, use_bias=False))
netG.add(nn.Activation('tanh'))
# state size. (nc) x 64 x 64
# build the discriminator
netD = nn.Sequential()
with netD.name_scope():
# input is (nc) x 64 x 64
netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False))
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 32 x 32
netD.add(nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 16 x 16
netD.add(nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 8 x 8
netD.add(nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 4 x 4
netD.add(nn.Conv2D(2, 4, 1, 0, use_bias=False))
# loss
loss = gluon.loss.SoftmaxCrossEntropyLoss()
# initialize the generator and the discriminator
netG.initialize(mx.init.Normal(0.02), ctx=ctx)
netD.initialize(mx.init.Normal(0.02), ctx=ctx)
# trainer for the generator and the discriminator
trainerG = gluon.Trainer(netG.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
# ============printing==============
real_label = mx.nd.ones((opt.batch_size,), ctx=ctx)
fake_label = mx.nd.zeros((opt.batch_size,), ctx=ctx)
metric = mx.metric.Accuracy()
print('Training... ')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
iter = 0
for epoch in range(opt.nepoch):
tic = time.time()
btic = time.time()
for data, _ in train_data:
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real_t
data = data.as_in_context(ctx)
noise = mx.nd.random.normal(0, 1, shape=(opt.batch_size, nz, 1, 1), ctx=ctx)
with autograd.record():
output = netD(data)
output = output.reshape((opt.batch_size, 2))
errD_real = loss(output, real_label)
metric.update([real_label,], [output,])
fake = netG(noise)
output = netD(fake.detach())
output = output.reshape((opt.batch_size, 2))
errD_fake = loss(output, fake_label)
errD = errD_real + errD_fake
errD.backward()
metric.update([fake_label,], [output,])
trainerD.step(opt.batch_size)
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
with autograd.record():
output = netD(fake)
output = output.reshape((-1, 2))
errG = loss(output, real_label)
errG.backward()
trainerG.step(opt.batch_size)
name, acc = metric.get()
# logging.info('speed: {} samples/s'.format(opt.batch_size / (time.time() - btic)))
logging.info('discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d' %(mx.nd.mean(errD).asscalar(), mx.nd.mean(errG).asscalar(), acc, iter, epoch))
if iter % 1 == 0:
visual('gout', fake.asnumpy(), name=os.path.join(outf,'fake_img_iter_%d.png' %iter))
visual('data', data.asnumpy(), name=os.path.join(outf,'real_img_iter_%d.png' %iter))
iter = iter + 1
btic = time.time()
name, acc = metric.get()
metric.reset()
logging.info('\nbinary training acc at epoch %d: %s=%f' % (epoch, name, acc))
logging.info('time: %f' % (time.time() - tic))
if check_point:
netG.save_parameters(os.path.join(outf,'generator_epoch_%d.params' %epoch))
netD.save_parameters(os.path.join(outf,'discriminator_epoch_%d.params' % epoch))
netG.save_parameters(os.path.join(outf, 'generator.params'))
netD.save_parameters(os.path.join(outf, 'discriminator.params'))
| apache-2.0 |
trnet4334/img_colorization | landscape_colorizer/colorization_concate_pretrained_conv_layers.py | 1 | 7895 | from keras.models import Sequential, model_from_json
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape
from keras.layers import Merge
from keras.layers.convolutional import Convolution2D, MaxPooling2D,Conv2D
from keras.utils import np_utils
from keras.layers.normalization import BatchNormalization
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import os, shutil
import theano
from PIL import Image
from numpy import *
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
from skimage.color import rgb2lab
import skimage.color as color
# General parameters
img_rows, img_cols = 96, 96 # Image dimensions after resizing
bin_num = 20 # For classification : Since a and b channel contains continous value from -100 to 100, we bin them to several classes
input_channels = 1 # The paper use 3 duplicated channel as input since pre-trained network has 3 channel, but we can use 1 if we are not using VGG-16
test_img_num = 40 # Use first-n files in the data folder to test the model
lab_channels = ['l', 'a', 'b']
# Cnn model parameters
era = 1000
epoch = 3
batch_size = 16
validation_split = 0.1
# Paths
img_input_path = "./combined/"
img_output_path = "./predict_output_concate_pretrained_conv_layers/"
img_reconstructed_path = "./reconstructed_input_after_bining/"
img_channels_path = "./channels_img/"
def save_img_of_channel(img_lab, channel, name="img"):
img_lab_cp = img_lab.copy()
# Delete the rest channels by setting them to 0
if channel == 'l':
img_lab_cp[:,:,1:] = 0
elif channel == 'a':
img_lab_cp[:,:,0] = 0
img_lab_cp[:,:,2] = 0
elif channel == 'b':
img_lab_cp[:,:,:2] = 0
else:
print "[ERROR!!] The channel should be 'l', 'a' or 'b' "
return
img_rgb_channel = color.lab2rgb(img_lab_cp)
im = Image.fromarray((img_rgb_channel * 255).astype(uint8))
im.save(img_channels_path + name + "_" + channel + ".jpg", "jpeg")
def save_image_by_channels(img_lab, name):
# Seperate the image channels L a* and b*
for i in xrange(0, len(lab_channels)):
img = img_lab[:,:,i]
save_img_of_channel(img_lab, lab_channels[i], name=name)
def reconstruct_image_by_lab_channels(img_l, img_a, img_b):
img = array([img_l.T, img_a.T, img_b.T]).T
img_rgb_channel = color.lab2rgb(img)
im = Image.fromarray((img_rgb_channel * 255).astype(uint8))
return im
def get_img_ab_binned(img_lab):
img_a = img_lab[:,:,1]
img_b = img_lab[:,:,2]
img_a_binned = ((img_a + 100) * bin_num) / 200
img_b_binned = ((img_b + 100) * bin_num) / 200
return img_a_binned.astype(int), img_b_binned.astype(int)
def get_img_ab_unbinned(img_a_binned, img_b_binned):
img_a_unbinned = ((img_a_binned * 200) / bin_num) - 100.0
img_b_unbinned = ((img_b_binned * 200) / bin_num) - 100.0
return img_a_unbinned, img_b_unbinned
def save_input_image_after_bining(img_lab, name='img'):
# Use this function to test how bin_num affect the original input image
img_a_binned, img_b_binned = get_img_ab_binned(img_lab)
img_a_unbinned, img_b_unbinned = get_img_ab_unbinned(img_a_binned, img_b_binned)
im = reconstruct_image_by_lab_channels(img_lab[:,:,0], img_a_unbinned, img_b_unbinned)
im.save(img_reconstructed_path + name + "_reconstructed_after_bining.jpg", "jpeg")
def get_duplicated_l_channel(img_l, channels):
img_l_duplicated = []
for i in xrange(channels):
img_l_duplicated.append(img_l.T)
result = array(img_l_duplicated).T
return result
''' Start Here '''
imlist = os.listdir(img_input_path)
imlist.sort()
# ''' For playing with lab images and also testing the affect of bining '''
for i in xrange(test_img_num):
# Save image of each channel (l, a, b)
img_rgb = array(Image.open(img_input_path + imlist[i]).resize((img_rows,img_cols)))
img_lab = rgb2lab(img_rgb)
save_image_by_channels(img_lab, imlist[i])
# Test the color distortion of input image after bining
save_input_image_after_bining(img_lab, name = imlist[i])
''' For training and testing cnn model '''
X = [] # Traning inputs
X_l = [] # Keep the l channel to reconstruct the image from lab to rgb
Y = [] # Traning labels
count = 1;
for img in imlist:
print "loading data .... " + str(count) + "/" +str(len(imlist))
img_rgb = array(Image.open(img_input_path + img).resize((img_rows,img_cols)))
img_lab = rgb2lab(img_rgb)
img_a_binned, img_b_binned = get_img_ab_binned(img_lab)
img_y = np.append(img_a_binned.flatten(), img_b_binned.flatten())
y = np_utils.to_categorical(img_y, bin_num)
X.append(get_duplicated_l_channel(img_lab[:,:,0], input_channels)) # The paper use 3 duplicated l channel as network input
X_l.append(img_lab[:,:,0])
Y.append(y)
count += 1
X = array(X)
Y = array(Y)
X_l = array(X_l)
print X.shape
print Y.shape
# Use the trained model to do prediction
# Load model from json file
json_file = open('model(80).json', 'r')
loaded_model_json = json_file.read()
json_file.close()
l_model = model_from_json(loaded_model_json)
l_model.load_weights("weight(80).hdf5")
l_model.summary()
# Conv-Pooling Layers
model1 = Sequential()
model1.add(Conv2D(24, 3, input_shape=(img_rows, img_cols, 1), name='m1_c1'))
model1.add(BatchNormalization(name='m1_n1'))
model1.add(Activation('relu', name='m1_r1'))
model1.add(Dropout(0.4, name='m1_d1'))
model1.add(MaxPooling2D(pool_size=(2, 2), name='m1_m1'))
model1.add(Conv2D(48, 3, name='m1_c2'))
model1.add(BatchNormalization(name='m1_n2'))
model1.add(Activation('relu', name='m1_r2'))
model1.add(MaxPooling2D(pool_size=(2, 2), name='m1_m2'))
model1.add(Dropout(0.4, name='m1_d2'))
model1.add(Conv2D(96, 3, name='m1_c3'))
model1.add(BatchNormalization(name='m1_n3'))
model1.add(Activation('relu', name='m1_r3'))
model1.add(MaxPooling2D(pool_size=(2, 2), name='m1_m3'))
model1.add(Dropout(0.4, name='m1_d3'))
for i in xrange(0, len(model1.layers), 1):
model1.layers[i].set_weights(l_model.layers[i].get_weights())
model1.layers[i].trainable = False
model1.add(Flatten(name='m1_f1'))
model2 = Sequential()
model2.add(Conv2D(24, 3, input_shape=(img_rows, img_cols, 1)))
model2.add(BatchNormalization())
model2.add(Activation('relu'))
model2.add(Dropout(0.4))
model2.add(MaxPooling2D(pool_size=(2, 2)))
model2.add(Conv2D(48, 3))
model2.add(BatchNormalization())
model2.add(Activation('relu'))
model2.add(MaxPooling2D(pool_size=(2, 2)))
model2.add(Dropout(0.4))
for i in xrange(0, len(model2.layers), 1):
model2.layers[i].set_weights(l_model.layers[i].get_weights())
model2.layers[i].trainable = False
model2.add(Flatten())
model = Sequential()
model.add(Merge([model1, model2], mode = 'concat'))
model.add(Dense(3600, name ='dd'))
model.add(BatchNormalization(name='nn'))
model.add(Activation('relu', name='rr'))
model.add(Dense(128))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(img_rows * img_cols * 2 * bin_num, name='den'))
model.add(Reshape((img_rows * img_cols * 2, bin_num)))
model.add(Activation('softmax', name="act"))
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=["acc"])
for j in xrange(era):
hist = model.fit([X[test_img_num:], X[test_img_num:]], Y[test_img_num:], batch_size=batch_size, nb_epoch=epoch,
verbose=1, validation_split=validation_split, shuffle=True)
if j % 10 == 0:
for i in xrange(0, test_img_num):
xx = X[i].flatten().reshape(1, img_rows, img_cols, input_channels)
result = model.predict_classes([xx, xx])
print result
reshaped = result.reshape(2, img_rows, img_cols)
a, b = get_img_ab_unbinned(reshaped[0], reshaped[1])
im = reconstruct_image_by_lab_channels(X_l[i], a, b)
im.save(img_output_path + imlist[i] + "_predicted_" + "era_" +str(j) + ".jpg", "jpeg")
model_json = model.to_json()
with open("colorize_with_pretrain.json", "w") as json_file:
json_file.write(model_json)
model.save_weights("colorize_with_pretrain.hdf5", overwrite=True)
| mit |
mclaughlin6464/pylearn2 | pylearn2/optimization/test_batch_gradient_descent.py | 44 | 6402 | from __future__ import print_function
from pylearn2.optimization.batch_gradient_descent import BatchGradientDescent
import theano.tensor as T
from pylearn2.utils import sharedX
import numpy as np
from theano.compat.six.moves import xrange
from theano import config
from theano.printing import min_informative_str
def test_batch_gradient_descent():
""" Verify that batch gradient descent works by checking that
it minimizes a quadratic function f(x) = x^T A x + b^T x + c
correctly for several sampled values of A, b, and c.
The ground truth minimizer is x = np.linalg.solve(A,-b)"""
n = 3
A = T.matrix(name = 'A')
b = T.vector(name = 'b')
c = T.scalar(name = 'c')
x = sharedX( np.zeros((n,)) , name = 'x')
half = np.cast[config.floatX](0.5)
obj = half * T.dot(T.dot(x,A),x)+T.dot(b,x)+c
minimizer = BatchGradientDescent(
objective = obj,
params = [ x],
inputs = [ A, b, c])
num_samples = 3
rng = np.random.RandomState([1,2,3])
for i in xrange(num_samples):
A = np.cast[config.floatX](rng.randn(1.5*n,n))
A = np.cast[config.floatX](np.dot(A.T,A))
A += np.cast[config.floatX](np.identity(n) * .02)
b = np.cast[config.floatX](rng.randn(n))
c = np.cast[config.floatX](rng.randn())
x.set_value(np.cast[config.floatX](rng.randn(n)))
analytical_x = np.linalg.solve(A,-b)
actual_obj = minimizer.minimize(A,b,c)
actual_x = x.get_value()
#Check that the value returned by the minimize method
#is the objective function value at the parameters
#chosen by the minimize method
cur_obj = minimizer.obj(A,b,c)
assert np.allclose(actual_obj, cur_obj)
x.set_value(analytical_x)
analytical_obj = minimizer.obj(A,b,c)
#make sure the objective function is accurate to first 4 digits
condition1 = not np.allclose(analytical_obj, actual_obj)
condition2 = np.abs(analytical_obj-actual_obj) >= 1e-4 * \
np.abs(analytical_obj)
if (config.floatX == 'float64' and condition1) \
or (config.floatX == 'float32' and condition2):
print('objective function value came out wrong on sample ',i)
print('analytical obj', analytical_obj)
print('actual obj',actual_obj)
"""
The following section of code was used to verify that numerical
error can make the objective function look non-convex
print('Checking for numerically induced non-convex behavior')
def f(x):
return 0.5 * np.dot(x,np.dot(A,x)) + np.dot(b,x) + c
x.set_value(actual_x)
minimizer._compute_grad(A,b,c)
minimizer._normalize_grad()
d = minimizer.param_to_grad_shared[x].get_value()
x = actual_x.copy()
prev = f(x)
print(prev)
step_size = 1e-4
x += step_size * d
cur = f(x)
print(cur)
cur_sgn = np.sign(cur-prev)
flip_cnt = 0
for i in xrange(10000):
x += step_size * d
prev = cur
cur = f(x)
print(cur)
prev_sgn = cur_sgn
cur_sgn = np.sign(cur-prev)
if cur_sgn != prev_sgn:
print('flip')
flip_cnt += 1
if flip_cnt > 1:
print("Non-convex!")
from matplotlib import pyplot as plt
y = []
x = actual_x.copy()
for j in xrange(10000):
y.append(f(x))
x += step_size * d
plt.plot(y)
plt.show()
assert False
print('None found')
"""
#print 'actual x',actual_x
#print 'A:'
#print A
#print 'b:'
#print b
#print 'c:'
#print c
x.set_value(actual_x)
minimizer._compute_grad(A,b,c)
x_grad = minimizer.param_to_grad_shared[x]
actual_grad = x_grad.get_value()
correct_grad = 0.5 * np.dot(A,x.get_value())+ 0.5 * \
np.dot(A.T, x.get_value()) +b
if not np.allclose(actual_grad, correct_grad):
print('gradient was wrong at convergence point')
print('actual grad: ')
print(actual_grad)
print('correct grad: ')
print(correct_grad)
print('max difference: ', end='')
np.abs(actual_grad-correct_grad).max()
assert False
minimizer._normalize_grad()
d = minimizer.param_to_grad_shared[x].get_value()
step_len = ( np.dot(b,d) + 0.5 * np.dot(d,np.dot(A,actual_x)) \
+ 0.5 * np.dot(actual_x,np.dot(A,d)) ) \
/ np.dot(d, np.dot(A,d))
g = np.dot(A,actual_x)+b
deriv = np.dot(g,d)
print('directional deriv at actual', deriv)
print('optimal step_len', step_len)
optimal_x = actual_x - d * step_len
g = np.dot(A,optimal_x) + b
deriv = np.dot(g,d)
print('directional deriv at optimal: ',deriv)
x.set_value(optimal_x)
print('obj at optimal: ',minimizer.obj(A,b,c))
print('eigenvalue range:')
val, vec = np.linalg.eig(A)
print((val.min(),val.max()))
print('condition number: ',(val.max()/val.min()))
assert False
if __name__ == '__main__':
test_batch_gradient_descent()
| bsd-3-clause |
samueljackson92/tsp-solver | results/ipython_log.py | 1 | 9972 | # IPython log file
get_ipython().magic(u'logstart')
get_ipython().magic(u'matplotlib inline')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tspsolver.tsp_generator import TSPGenerator
from tspsolver.ga.simulator import Simulator
from tspsolver.tuning import GeneticAlgorithmParameterEstimation
OUTPUT_TABLE_DIR = "../report/tables/"
OUTPUT_FIG_DIR = "../report/figures/"
NUM_DATASETS = 5 # number of datasets to take the mean fitness over
NUM_POINTS = 50 # number of points to use in each dataset
tuner = GeneticAlgorithmParameterEstimation(NUM_DATASETS, NUM_POINTS)
params = {
"num_epochs": [1000],
"num_elites": [0, 1, 2],
"generator": ["SimplePopulationGenerator"],
"generator_population_size": [40],
"selector": ["TournamentSelection"],
"selector_tournament_size": [10],
"crossover": ["OrderCrossover"],
"crossover_pcross": [0.9],
"mutator": ["InversionMutation"],
"mutator_pmutate": [0.2]
}
elite_results = tuner.perform_grid_search(params)
elite_results
gen = TSPGenerator(NUM_POINTS)
data = gen.generate()
all_fitness = []
for i, row in elite.iterrows():
params = row.to_dict()
sim = Simulator(**params)
sim.evolve(data)
all_fitness.append(sim.get_min_fitness()[::10])
df = pd.DataFrame(np.array(all_fitness))
gen = TSPGenerator(NUM_POINTS)
data = gen.generate()
all_fitness = []
for i, row in elite_results.iterrows():
params = row.to_dict()
sim = Simulator(**params)
sim.evolve(data)
all_fitness.append(sim.get_min_fitness()[::10])
df = pd.DataFrame(np.array(all_fitness))
df
df.T.plot()
ax = df.T.plot()
ax.set_xlabel("Epoch")
ax.set_ylabel("Fitness")
ax = df.T.plot()
ax.set_xlabel("Epoch")
ax.set_ylabel("Fitness")
plt.savefig(OUTPUT_FIG_DIR + 'elite_convergence.png', bbox_inches='tight')
elite_results[['num_elites', 'fitness']].to_latex(OUTPUT_TABLE_DIR + "selection_vs_pop_size2.tex")
elite_results[['num_elites', 'fitness']].to_latex(OUTPUT_TABLE_DIR + "elite_fitness.tex")
params = {
"num_epochs": [1000],
"num_elites": [0, 1, 2],
"generator": ["SimplePopulationGenerator"],
"generator_population_size": [40],
"selector": ["TournamentSelection"],
"selector_tournament_size": [10],
"crossover": ["OrderCrossover"],
"crossover_pcross": [0.9],
"mutator": ["InversionMutation"],
"mutator_pmutate": [0.2]
}
elite_results = tuner.perform_grid_search(params)
elite_results
elite_results[['num_elites', 'fitness']].to_latex(OUTPUT_TABLE_DIR + "elite_fitness.tex")
params = {
"num_epochs": [1000],
"num_elites": [1],
"generator": ["SimplePopulationGenerator", "KNNPopulationGenerator"],
"generator_population_size": [40],
"selector": ["TournamentSelection"],
"selector_tournament_size": [10],
"crossover": ["OrderCrossover"],
"crossover_pcross": [0.9],
"mutator": ["InversionMutation"],
"mutator_pmutate": [0.2]
}
elite_results = tuner.perform_grid_search(params)
knn_results = tuner.perform_grid_search(params)
knn_results
params = {
"num_epochs": [1000],
"num_elites": [1],
"generator": ["SimplePopulationGenerator", "KNNPopulationGenerator"],
"generator_random_proportion": [0.3, 0.5, 0.6],
"generator_population_size": [40],
"selector": ["TournamentSelection"],
"selector_tournament_size": [10],
"crossover": ["OrderCrossover"],
"crossover_pcross": [0.9],
"mutator": ["InversionMutation"],
"mutator_pmutate": [0.2]
}
knn_results = tuner.perform_grid_search(params)
knn_results
knn_results.loc[0]
knn_results.iloc[0]
knn_results.iloc[[0,3,4,5]]
t = knn_results.iloc[[0,3,4,5]]
t = knn_results.iloc[[0,3,4,5]].T
t = knn_results.iloc[[0,3,4,5]].T
t = knn_results.iloc[[0,3,4,5]]
t = knn_results.iloc[[0,3,4,5]]
t
t = knn_results.iloc[[0,3,4,5]]
t[['generator', 'generator_radnom_proportion', 'fitness']]
t = knn_results.iloc[[0,3,4,5]]
t[['generator', 'generator_random_proportion', 'fitness']]
t = knn_results.iloc[[0,3,4,5]]
t[['generator', 'generator_random_proportion', 'fitness']]
t.iloc[0]['generator_random_proportion'] = np.NaN
t = knn_results.iloc[[0,3,4,5]]
t[['generator', 'generator_random_proportion', 'fitness']]
t = .iloc[0]['generator_random_proportion'] = np.NaN
t
t = knn_results.iloc[[0,3,4,5]]
t[['generator', 'generator_random_proportion', 'fitness']]
t = t.iloc[0]['generator_random_proportion'] = np.NaN
t
t = knn_results.iloc[[0,3,4,5]]
t[['generator', 'generator_random_proportion', 'fitness']]
t.iloc[0] = t.iloc[0]['generator_random_proportion'] = np.NaN
t
t = knn_results.iloc[[0,3,4,5]]
t[['generator', 'generator_random_proportion', 'fitness']]
t.iloc[0]['generator_random_proportion'] = np.NaN
t
t = knn_results.iloc[[0,3,4,5]]
t.iloc[0]['generator_random_proportion'] = np.NaN
t[['generator', 'generator_random_proportion', 'fitness']]
t = knn_results.iloc[[0,3,4,5]]
t.iloc[0]['generator_random_proportion'] = np.NaN
t[['generator', 'generator_random_proportion', 'fitness']]
t = knn_results.iloc[[0,3,4,5]]
t.iloc[0]['generator_random_proportion'] = np.NaN
t[['generator', 'generator_random_proportion', 'fitness']]
t = knn_results.iloc[[0,3,4,5]]
t['generator_random_proportion', :0] = np.NaN
t[['generator', 'generator_random_proportion', 'fitness']]
t = knn_results.iloc[[0,3,4,5]]
t[0, 'generator_random_proportion'] = np.NaN
t[['generator', 'generator_random_proportion', 'fitness']]
t = knn_results.iloc[[0,3,4,5]]
t[0, 2] = np.NaN
t[['generator', 'generator_random_proportion', 'fitness']]
t = knn_results.iloc[[0,3,4,5]]
t[0, 1] = np.NaN
t[['generator', 'generator_random_proportion', 'fitness']]
t = knn_results.iloc[[0,3,4,5]]
t['generator_random_proportion'][0] = np.NaN
t[['generator', 'generator_random_proportion', 'fitness']]
t = knn_results.iloc[[0,3,4,5]]
t['generator_random_proportion'][0] = np.NaN
t[['generator', 'generator_random_proportion', 'fitness']]
t = knn_results.iloc[[0,3,4,5]]
t['generator_random_proportion'][0] = np.NaN
t.index = np.arange(4)
t[['generator', 'generator_random_proportion', 'fitness']]
t[['generator', 'generator_random_proportion', 'fitness']].to_latex(OUTPUT_TABLE_DIR + "knn_fitness.tex")
gen = TSPGenerator(NUM_POINTS)
data = gen.generate()
all_fitness = []
for i, row in knn_results.iterrows():
params = row.to_dict()
sim = Simulator(**params)
sim.evolve(data)
all_fitness.append(sim.get_min_fitness()[::10])
df = pd.DataFrame(np.array(all_fitness))
ax = df.T.plot()
ax.set_xlabel("Epoch")
ax.set_ylabel("Fitness")
# plt.savefig(OUTPUT_FIG_DIR + 'elite_convergence.png', bbox_inches='tight')
gen = TSPGenerator(NUM_POINTS)
data = gen.generate()
p = knn_results.iloc[[0,3,4,5]]
all_fitness = []
for i, row in p.iterrows():
params = row.to_dict()
sim = Simulator(**params)
sim.evolve(data)
all_fitness.append(sim.get_min_fitness()[::10])
df = pd.DataFrame(np.array(all_fitness))
ax = df.T.plot()
ax.set_xlabel("Epoch")
ax.set_ylabel("Fitness")
# plt.savefig(OUTPUT_FIG_DIR + 'elite_convergence.png', bbox_inches='tight')
params = {
"num_epochs": [1000],
"num_elites": [1],
"generator": ["SimplePopulationGenerator", "KNNPopulationGenerator"],
"generator_random_proportion": [0.3, 0.5, 0.6],
"generator_population_size": [40],
"selector": ["TournamentSelection"],
"selector_tournament_size": [10],
"crossover": ["OrderCrossover"],
"crossover_pcross": [0.9],
"mutator": ["InversionMutation"],
"mutator_pmutate": [0.2]
}
NUM_DATASETS = 5 # number of datasets to take the mean fitness over
NUM_POINTS = 100 # number of points to use in each dataset
tuner = GeneticAlgorithmParameterEstimation(NUM_DATASETS, NUM_POINTS)
knn_results = tuner.perform_grid_search(params)
t = knn_results.iloc[[0,3,4,5]]
t['generator_random_proportion'][0] = np.NaN
t.index = np.arange(4)
t[['generator', 'generator_random_proportion', 'fitness']]
t[['generator', 'generator_random_proportion', 'fitness']].to_latex(OUTPUT_TABLE_DIR + "knn_fitness.tex")
gen = TSPGenerator(NUM_POINTS)
data = gen.generate()
p = knn_results.iloc[[0,3,4,5]]
all_fitness = []
for i, row in p.iterrows():
params = row.to_dict()
sim = Simulator(**params)
sim.evolve(data)
all_fitness.append(sim.get_min_fitness()[::10])
df = pd.DataFrame(np.array(all_fitness))
ax = df.T.plot()
ax.set_xlabel("Epoch")
ax.set_ylabel("Fitness")
plt.savefig(OUTPUT_FIG_DIR + 'knn_convergence.png', bbox_inches='tight')
df
ax = df.T.plot()
ax.set_xlabel("Epoch")
ax.set_ylabel("Fitness")
plt.savefig(OUTPUT_FIG_DIR + 'knn_convergence.png', bbox_inches='tight')
df
t
df['generator-name'] = selection_results[['generator', 'generator_random_proportion']].apply(lambda x: '-'.join([x[0], str(x[1])]), axis=1)
t['generator-name'] = t[['generator', 'generator_random_proportion']].apply(lambda x: '-'.join([x[0], str(x[1])]), axis=1)
t['generator-name'] = t[['generator', 'generator_random_proportion']].apply(lambda x: '-'.join([x[0], str(x[1])]), axis=1)
t
t['generator-name'] = t[['generator', 'generator_random_proportion']].apply(lambda x: '-'.join([x[0], str(x[1])), axis=1)
t
t['generator-name'] = t[['generator', 'generator_random_proportion']].apply(lambda x: '-'.join([x[0], str(x[1])]), axis=1)
t['generator-name'] = t[['generator', 'generator_random_proportion']].apply(lambda x: '-'.join([x[0], str(x[1])]), axis=1)
t
t['generator-name'] = t[['generator', 'generator_random_proportion']].apply(lambda x: '-'.join([x[0], str(x[1])]), axis=1)
t['generator-name'][0] = t['generator']
t['generator-name'] = t[['generator', 'generator_random_proportion']].apply(lambda x: '-'.join([x[0], str(x[1])]), axis=1)
t['generator-name'][0] = t['generator']
t
t['generator-name'] = t[['generator', 'generator_random_proportion']].apply(lambda x: '-'.join([x[0], str(x[1])]), axis=1)
t['generator-name'][0] = t['generator'][0]
t
df.index = t['generator-name']
ax = df.T.plot()
ax.set_xlabel("Epoch")
ax.set_ylabel("Fitness")
plt.savefig(OUTPUT_FIG_DIR + 'knn_convergence.png', bbox_inches='tight')
| mit |
perryjohnson/biplaneblade | biplane_blade_lib/layer_plane_angles_stn18.py | 1 | 9219 | """Determine the layer plane angle of all the elements in a grid.
Author: Perry Roth-Johnson
Last modified: April 30, 2014
Usage:
1. Look through the mesh_stnXX.abq file and find all the element set names.
(Find all the lines that start with "*ELSET".)
2. Enter each of the element set names in one of the four lists below:
(1) list_of_LE_elementsets
(2) list_of_TE_elementsets
(3) list_of_lower_elementsets
(4) list_of_upper_elementsets
3. Run this script. Visually inspect the plot to make sure each of the element
sets are in the correct list. (The blue edge should be facing outward,
relative to the airfoil centerpoint. The magenta edge should be facing
inward.) If you find an element that is oriented incorrectly, note the
element number, and look up it's element set name from the printout in the
IPython terminal. Then, move that element set name to a different list in
this script.
4. Repeat step 3 until your visual inspection suggests that all the edges (and
layer plane angles) are being assigned correctly.
References:
http://stackoverflow.com/questions/3365171/calculating-the-angle-between-two-lines-without-having-to-calculate-the-slope/3366569#3366569
http://stackoverflow.com/questions/19295725/angle-less-than-180-between-two-segments-lines
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import lib.grid as gr
reload(gr)
import lib.abaqus_utils2 as au
reload(au)
import lib.vabs_utils as vu
reload(vu)
import lib.blade as bl
from shapely.geometry import Polygon, LineString
from descartes import PolygonPatch
# -----------------------------------------------
# update these parameters!
station_num = 18
skip_num = 25 # plot every 'skip_num' elements (larger values plot faster)
IS4_resin_u2_tri_elem_num = 2896 # num of tri elem in int surf 3 resin upper 2
IS4_resin_l2_tri_elem_num = 2883 # num of tri elem in int surf 3 resin lower 2
TE_reinf_foam_u3_tri_elem_num = 419 # num of tri elem in TE reinf foam upper 3
TE_reinf_foam_l3_tri_elem_num = 395 # num of tri elem in TE reinf foam lower 3
# -----------------------------------------------
stn_str = 'stn{0:02d}'.format(station_num)
plt.close('all')
# load the biplane blade
b1 = bl.BiplaneBlade(
'biplane blade, flapwise symmetric, no stagger, rj/R=0.452, g/c=1.25',
'biplane_blade')
# pre-process the station dimensions
station = b1.list_of_stations[station_num-1]
st = station.structure
af = station.airfoil
af.create_polygon()
st.create_all_layers()
st.save_all_layer_edges()
st.write_all_part_polygons()
x3_off = af.lower_chord * af.gap_to_chord_ratio * af.gap_fraction
# plot the parts
station.plot_parts_offset(airfoil_to_plot='lower', x3_offset=x3_off)
# create a figure
ax = plt.gcf().gca()
# element sets on the leading edge
# outer_edge_node_nums=[1,4], inner_edge_node_nums=[2,3]
list_of_LE_elementsets = [
'lepanel',
'rbtrile',
'esgelle',
'estrile',
'is1rsle',
'is1trile',
'sw1biaxl',
'sw1foam',
'sw1biaxr',
'is1rlsw1',
'is1tlsw1',
'is2rrsw1',
'is2trsw1'
]
# element sets on the trailing edge
# outer_edge_node_nums=[3,2], inner_edge_node_nums=[4,1]
list_of_TE_elementsets = [
'is3reste',
'is3trite',
'sw2biaxl',
'sw2foam',
'sw2biaxr',
'sw3biaxl',
'sw3foam',
'sw3biaxr',
'is2rlsw2',
'is2tlsw2',
'is3rrsw2',
'is3trsw2',
'is3rlsw3',
'is3tlsw3',
'is4rrsw3',
'is4trsw3'
]
# element sets on the lower surface
# outer_edge_node_nums=[2,1], inner_edge_node_nums=[3,4]
list_of_lower_elementsets = [
'tefoaml1',
'tefoaml2',
'tefoaml3',
'teunil1',
'teunil2',
'teunil3',
'teunil4',
'ap1lower',
'ap2lower',
'esgllap1',
'estrlap1',
'rbtrlap1',
'is3rlap1',
'is3tlap1',
'esgllap2',
'estrlap2',
'is4rlap2',
'is4tlap2',
'sclower',
'rbtriscl',
'rbtrtel1',
'rbtrtel2',
'rbtrtel3',
'rbtrtel4',
'estrtel1',
'estrtel2',
'estrtel3',
'estrtel4',
'esgltel1',
'esgltel2',
'esgltel3',
'esgltel4',
'esgelscl',
'estriscl',
'is2rsscl',
'is2trscl',
'is4rtel1',
'is4rtel2',
'is4ttel1',
'is4ttel2',
'rbtrbsw1',
'esglbsw1',
'estrbsw1',
'esglbsw2',
'estrbsw2',
'rbtrbsw2',
'estrbsw3',
'esglbsw3'
]
# element sets on the upper surface
# outer_edge_node_nums=[4,3], inner_edge_node_nums=[1,2]
list_of_upper_elementsets = [
'tefoamu1',
'tefoamu2',
'tefoamu3',
'teuniu1',
'teuniu2',
'teuniu3',
'teuniu4',
'ap1upper',
'ap2upper',
'esgluap1',
'estruap1',
'rbtruap1',
'is3ruap1',
'is3tuap1',
'esgluap2',
'estruap2',
'is4ruap2',
'is4tuap2',
'is4rteu1',
'is4rteu2',
'is4tteu1',
'is4tteu2',
'esglasw1',
'estrasw1',
'rbtrasw1',
'rbtrteu1',
'rbtrteu2',
'rbtrteu3',
'rbtrteu4',
'estrteu1',
'estrteu2',
'estrteu3',
'estrteu4',
'esglteu1',
'esglteu2',
'esglteu3',
'esglteu4',
'esglasw2',
'estrasw2',
'rbtrasw2',
'esglasw3',
'estrasw3',
'scupper',
'rbtriscu',
'estriscu',
'is2rsscu',
'esgelscu',
'is2trscu'
]
# element sets of triangular elements on the lower surface
# outer_edge_node_nums=[2,1]
list_of_tri_lower_elementsets = [
'is4rtel2_tri',
'tefoaml3_tri'
]
# element sets of triangular elements on the upper surface
# outer_edge_node_nums=[3,2]
list_of_tri_upper_elementsets = [
'is4rteu2_tri',
'tefoamu3_tri'
]
# import the initial grid object
fmt_grid = 'biplane_blade/' + stn_str + '/mesh_' + stn_str + '.abq'
g = au.AbaqusGrid(fmt_grid, debug_flag=True)
# manually assign two triangular elements into new element sets
g.list_of_elements[IS4_resin_u2_tri_elem_num-1].element_set = 'is4rteu2_tri'
g.list_of_elements[IS4_resin_l2_tri_elem_num-1].element_set = 'is4rtel2_tri'
g.list_of_elements[TE_reinf_foam_u3_tri_elem_num-1].element_set = 'tefoamu3_tri'
g.list_of_elements[TE_reinf_foam_l3_tri_elem_num-1].element_set = 'tefoaml3_tri'
# update the grid object with all the layer plane angles
for elem in g.list_of_elements:
if elem.element_set in list_of_LE_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[1,4],
inner_edge_node_nums=[2,3])
elif elem.element_set in list_of_TE_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[3,2],
inner_edge_node_nums=[4,1])
elif elem.element_set in list_of_lower_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[2,1],
inner_edge_node_nums=[3,4])
elif elem.element_set in list_of_upper_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[4,3],
inner_edge_node_nums=[1,2])
elif elem.element_set in list_of_tri_lower_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[2,1])
elif elem.element_set in list_of_tri_upper_elementsets:
elem.calculate_layer_plane_angle(outer_edge_node_nums=[3,2])
else:
raise Warning("Element #{0} has no element set!".format(elem.elem_num))
# plot a small selection of elements to check the results
for elem in g.list_of_elements[::skip_num]:
elem.plot(label_nodes=False)
print elem.elem_num, elem.element_set, elem.theta1
g.list_of_elements[TE_reinf_foam_u3_tri_elem_num-1].plot()
g.list_of_elements[TE_reinf_foam_u3_tri_elem_num-2].plot()
g.list_of_elements[TE_reinf_foam_l3_tri_elem_num-1].plot()
g.list_of_elements[TE_reinf_foam_l3_tri_elem_num-2].plot()
g.list_of_elements[IS4_resin_u2_tri_elem_num-1].plot()
g.list_of_elements[IS4_resin_u2_tri_elem_num-2].plot()
g.list_of_elements[IS4_resin_l2_tri_elem_num-1].plot()
g.list_of_elements[IS4_resin_l2_tri_elem_num-2].plot()
# show the plot
plt.xlim([-3,5])
plt.ylim([-3,3])
ax.set_aspect('equal')
print ' ------------------------'
print ' LEGEND'
print ' magenta : inner edge'
print ' blue : outer edge'
print ' ------------------------'
plt.show()
# -----------------------------------------------------------------------------
# read layers.csv to determine the number of layers
layer_file = pd.read_csv('biplane_blade/layers.csv', index_col=0)
number_of_layers = len(layer_file)
# write the updated grid object to a VABS input file
fmt_vabs = 'biplane_blade/' + stn_str + '/mesh_' + stn_str + '.vabs'
f = vu.VabsInputFile(
vabs_filename=fmt_vabs,
grid=g,
material_filename='biplane_blade/materials.csv',
layer_filename='biplane_blade/layers.csv',
debug_flag=True,
flags={
'format' : 1,
'Timoshenko' : 1,
'recover' : 0,
'thermal' : 0,
'curve' : 0,
'oblique' : 0,
'trapeze' : 0,
'Vlasov' : 0
})
| gpl-3.0 |
JPalmerio/GRB_population_code | grbpop/basic_example.py | 1 | 1760 | from cosmology import init_cosmology
from GRB_population import create_GRB_population_from
import io_grb_pop as io
import numpy as np
import logging
import sys
log = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format='%(asctime)s.%(msecs)03d [%(levelname)s] %(message)s',
datefmt='%H:%M:%S')
logging.getLogger('matplotlib').setLevel(logging.WARNING)
# Define the paths used by the code
paths_to_dir, paths_to_files = io.generate_paths(conf_fname='config_basic_example.yml',
param_fname='parameters_simple_example.yml',
init_dir=None)
# Read the input files
config, params, instruments, samples, obs_constraints = io.read_init_files(paths_to_files)
# Code calculates which samples, instruments, and constraints to include
incl_samples, incl_instruments, incl_constraints = io.create_config(config,
samples,
instruments,
obs_constraints)
# Initialize the cosmology
cosmo = init_cosmology(paths_to_dir['cosmo'])
# Generate the GRB population
np.random.seed(0)
gp = create_GRB_population_from(Nb_GRBs=config['Nb_GRBs'],
cosmo=cosmo,
params=params,
incl_samples=incl_samples,
incl_instruments=incl_instruments,
incl_constraints=incl_constraints,
output_dir=paths_to_dir['output'])
| gpl-3.0 |
facom/AstrodynTools | tides/animation-test.py | 1 | 1276 | """
See this interesting tutorial:
http://jakevdp.github.io/blog/2012/08/18/matplotlib-animation-tutorial
More examples at:
http://matplotlib.org/1.3.1/examples/animation/index.html
"""
from util import *
import numpy as np
import matplotlib.animation as animation
from matplotlib.pyplot import *
#########################################
#INITIALIZE FIGURE
#########################################
fig=figure()
ax=axes(xlim=(0,2*pi),ylim=(-1,1))
line1,=ax.plot([],[])
line2,=ax.plot([],[])
#########################################
#INITIALIZATION FUNCTION
#########################################
def init():
line1.set_data([],[])
line2.set_data([],[])
return line1,line2
#########################################
#ANIMATION FUNCTION
#########################################
x=linspace(0,2*pi,100)
def animate(iframe):
y1=sin(x+0.1*iframe)
y2=cos(x+0.1*iframe)
line1.set_data(x,y1)
line2.set_data(x,y2)
return line1,line2
#########################################
#PLOT
#########################################
anim=animation.FuncAnimation(fig,animate,init_func=init,frames=100,interval=100,blit=True)
#########################################
#SAVE IMAGE
#########################################
anim.save("animation.mp4",fps=20)
| gpl-2.0 |
q1ang/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 157 | 13799 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=np.float64)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
RomainBrault/scikit-learn | examples/linear_model/plot_logistic.py | 73 | 1568 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic function
=========================================================
Shown in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logistic curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='red', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(range(-5, 10))
plt.yticks([0, 0.5, 1])
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.legend(('Logistic Regression Model', 'Linear Regression Model'),
loc="lower right", fontsize='small')
plt.show()
| bsd-3-clause |
antiface/mne-python | mne/io/edf/tests/test_edf.py | 6 | 10037 | """Data Equivalence Tests"""
from __future__ import print_function
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Martin Billinger <martin.billinger@tugraz.at>
# Alan Leggitt <alan.leggitt@ucsf.edu>
# Alexandre Barachant <alexandre.barachant@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import inspect
import warnings
from nose.tools import assert_equal, assert_true
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_raises, assert_allclose)
from scipy import io
import numpy as np
from mne import pick_types, concatenate_raws
from mne.externals.six import iterbytes
from mne.utils import _TempDir, run_tests_if_main, requires_pandas
from mne.io import Raw, read_raw_edf, RawArray
from mne.io.tests.test_raw import _test_concat
import mne.io.edf.edf as edfmodule
from mne.event import find_events
warnings.simplefilter('always')
FILE = inspect.getfile(inspect.currentframe())
data_dir = op.join(op.dirname(op.abspath(FILE)), 'data')
montage_path = op.join(data_dir, 'biosemi.hpts')
bdf_path = op.join(data_dir, 'test.bdf')
edf_path = op.join(data_dir, 'test.edf')
edf_uneven_path = op.join(data_dir, 'test_uneven_samp.edf')
bdf_eeglab_path = op.join(data_dir, 'test_bdf_eeglab.mat')
edf_eeglab_path = op.join(data_dir, 'test_edf_eeglab.mat')
edf_uneven_eeglab_path = op.join(data_dir, 'test_uneven_samp.mat')
edf_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.edf')
edf_txt_stim_channel_path = op.join(data_dir, 'test_edf_stim_channel.txt')
eog = ['REOG', 'LEOG', 'IEOG']
misc = ['EXG1', 'EXG5', 'EXG8', 'M1', 'M2']
def test_concat():
"""Test EDF concatenation"""
_test_concat(read_raw_edf, bdf_path)
def test_bdf_data():
"""Test reading raw bdf files"""
raw_py = read_raw_edf(bdf_path, montage=montage_path, eog=eog,
misc=misc, preload=True)
assert_true('RawEDF' in repr(raw_py))
picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
data_py, _ = raw_py[picks]
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = io.loadmat(bdf_eeglab_path)
raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts
data_eeglab = raw_eeglab[picks]
# bdf saved as a single, resolution to seven decimal points in matlab
assert_array_almost_equal(data_py, data_eeglab, 8)
# Manually checking that float coordinates are imported
assert_true((raw_py.info['chs'][0]['eeg_loc']).any())
assert_true((raw_py.info['chs'][25]['eeg_loc']).any())
assert_true((raw_py.info['chs'][63]['eeg_loc']).any())
# Make sure concatenation works
raw_concat = concatenate_raws([raw_py.copy(), raw_py])
assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
def test_edf_data():
"""Test reading raw edf files"""
raw_py = read_raw_edf(edf_path, misc=range(-4, 0), stim_channel=139,
preload=True)
picks = pick_types(raw_py.info, meg=False, eeg=True,
exclude=['EDF Annotations'])
data_py, _ = raw_py[picks]
print(raw_py) # to test repr
print(raw_py.info) # to test Info repr
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = io.loadmat(edf_eeglab_path)
raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts
data_eeglab = raw_eeglab[picks]
assert_array_almost_equal(data_py, data_eeglab, 10)
# Make sure concatenation works
raw_concat = concatenate_raws([raw_py.copy(), raw_py])
assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
# Test uneven sampling
raw_py = read_raw_edf(edf_uneven_path, stim_channel=None)
data_py, _ = raw_py[0]
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = io.loadmat(edf_uneven_eeglab_path)
raw_eeglab = raw_eeglab['data']
data_eeglab = raw_eeglab[0]
# match upsampling
upsample = len(data_eeglab) / len(raw_py)
data_py = np.repeat(data_py, repeats=upsample)
assert_array_equal(data_py, data_eeglab)
def test_read_segment():
"""Test writing raw edf files when preload is False"""
tempdir = _TempDir()
raw1 = read_raw_edf(edf_path, stim_channel=None, preload=False)
raw1_file = op.join(tempdir, 'test1-raw.fif')
raw1.save(raw1_file, overwrite=True, buffer_size_sec=1)
raw11 = Raw(raw1_file, preload=True)
data1, times1 = raw1[:139, :]
data11, times11 = raw11[:139, :]
assert_allclose(data1, data11, rtol=1e-6)
assert_array_almost_equal(times1, times11)
assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
data2, times2 = raw1[0, 0:1]
assert_array_equal(data2[0], data1[0, 0:1])
assert_array_equal(times2, times1[0:1])
buffer_fname = op.join(tempdir, 'buffer')
for preload in (buffer_fname, True, False): # false here means "delayed"
raw2 = read_raw_edf(edf_path, stim_channel=None, preload=preload)
if preload is False:
raw2.preload_data()
raw2_file = op.join(tempdir, 'test2-raw.fif')
raw2.save(raw2_file, overwrite=True)
data2, times2 = raw2[:139, :]
assert_allclose(data1, data2, rtol=1e-6)
assert_array_equal(times1, times2)
raw1 = Raw(raw1_file, preload=True)
raw2 = Raw(raw2_file, preload=True)
assert_array_equal(raw1._data, raw2._data)
# test the _read_segment function by only loading some of the data
raw1 = read_raw_edf(edf_path, stim_channel=None, preload=False)
raw2 = read_raw_edf(edf_path, stim_channel=None, preload=True)
# select some random range of data to compare
data1, times1 = raw1[:, 345:417]
data2, times2 = raw2[:, 345:417]
assert_array_equal(data1, data2)
assert_array_equal(times1, times2)
def test_append():
"""Test appending raw edf objects using Raw.append"""
for preload in (True, False):
raw = read_raw_edf(bdf_path, preload=False)
raw0 = raw.copy()
raw1 = raw.copy()
raw0.append(raw1)
assert_true(2 * len(raw) == len(raw0))
assert_allclose(np.tile(raw[:, :][0], (1, 2)), raw0[:, :][0])
# different types can't combine
raw = read_raw_edf(bdf_path, preload=True)
raw0 = raw.copy()
raw1 = raw.copy()
raw2 = RawArray(raw[:, :][0], raw.info)
assert_raises(ValueError, raw.append, raw2)
def test_parse_annotation():
"""Test parsing the tal channel"""
# test the parser
annot = (b'+180\x14Lights off\x14Close door\x14\x00\x00\x00\x00\x00'
b'+180\x14Lights off\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+180\x14Close door\x14\x00\x00\x00\x00\x00\x00\x00\x00'
b'+3.14\x1504.20\x14nothing\x14\x00\x00\x00\x00'
b'+1800.2\x1525.5\x14Apnea\x14\x00\x00\x00\x00\x00\x00\x00'
b'+123\x14\x14\x00\x00\x00\x00\x00\x00\x00')
annot = [a for a in iterbytes(annot)]
annot[1::2] = [a * 256 for a in annot[1::2]]
tal_channel = map(sum, zip(annot[0::2], annot[1::2]))
events = edfmodule._parse_tal_channel(tal_channel)
assert_equal(events, [[180.0, 0, 'Lights off'],
[180.0, 0, 'Close door'],
[180.0, 0, 'Lights off'],
[180.0, 0, 'Close door'],
[3.14, 4.2, 'nothing'],
[1800.2, 25.5, 'Apnea']])
def test_edf_annotations():
"""Test if events are detected correctly in a typical MNE workflow."""
# test an actual file
raw = read_raw_edf(edf_path, preload=True)
edf_events = find_events(raw, output='step', shortest_event=0,
stim_channel='STI 014')
# onset, duration, id
events = [[0.1344, 0.2560, 2],
[0.3904, 1.0000, 2],
[2.0000, 0.0000, 3],
[2.5000, 2.5000, 2]]
events = np.array(events)
events[:, :2] *= 512 # convert time to samples
events = np.array(events, dtype=int)
events[:, 1] -= 1
events[events[:, 1] <= 0, 1] = 1
events[:, 1] += events[:, 0]
onsets = events[:, [0, 2]]
offsets = events[:, [1, 2]]
events = np.zeros((2 * events.shape[0], 3), dtype=int)
events[0::2, [0, 2]] = onsets
events[1::2, [0, 1]] = offsets
assert_array_equal(edf_events, events)
def test_write_annotations():
"""Test writing raw files when annotations were parsed."""
tempdir = _TempDir()
raw1 = read_raw_edf(edf_path, preload=True)
raw1_file = op.join(tempdir, 'test1-raw.fif')
raw1.save(raw1_file, overwrite=True, buffer_size_sec=1)
raw11 = Raw(raw1_file, preload=True)
data1, times1 = raw1[:, :]
data11, times11 = raw11[:, :]
assert_array_almost_equal(data1, data11)
assert_array_almost_equal(times1, times11)
assert_equal(sorted(raw1.info.keys()), sorted(raw11.info.keys()))
assert_raises(RuntimeError, read_raw_edf, edf_path, preload=False)
def test_edf_stim_channel():
"""Test stim channel for edf file"""
raw = read_raw_edf(edf_stim_channel_path, preload=True,
stim_channel=-1)
true_data = np.loadtxt(edf_txt_stim_channel_path).T
# EDF writer pad data if file to small
_, ns = true_data.shape
edf_data = raw._data[:, :ns]
# assert stim channels are equal
assert_array_equal(true_data[-1], edf_data[-1])
# assert data are equal
assert_array_almost_equal(true_data[0:-1] * 1e-6, edf_data[0:-1])
@requires_pandas
def test_to_data_frame():
"""Test edf Raw Pandas exporter"""
for path in [edf_path, bdf_path]:
raw = read_raw_edf(path, stim_channel=None, preload=True)
_, times = raw[0, :10]
df = raw.to_data_frame()
assert_true((df.columns == raw.ch_names).all())
assert_array_equal(np.round(times * 1e3), df.index.values[:10])
df = raw.to_data_frame(index=None, scalings={'eeg': 1e13})
assert_true('time' in df.index.names)
assert_array_equal(df.values[:, 0], raw._data[0] * 1e13)
run_tests_if_main()
| bsd-3-clause |
pnedunuri/scikit-learn | examples/mixture/plot_gmm_selection.py | 248 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
qiu997018209/MachineLearning | CSDN唐宇迪培训项目实战/机器学习之泰坦尼克号获救预测项目实战/机器学习之泰坦尼克号获救预测项目实战.py | 1 | 8971 |
# coding: utf-8
# In[5]:
import pandas
titanic = pandas.read_csv("D:\\test\\titanic_train.csv")
#进行简单的统计学分析
print titanic.describe()#std代表方差,Age中存在缺失值
# In[6]:
#以下操作为对数据进行预处理
#算法大多是矩阵运算,不能存在缺失值,用均值来填充缺失值
titanic["Age"] = titanic["Age"].fillna(titanic["Age"].median())
print titanic.describe()#std代表方差,Age中存在缺失值
# In[7]:
#sex是字符串,无法进行计算,将它转成数字,用0代表man,1代表female
print titanic["Sex"].unique()
titanic.loc[titanic["Sex"]=="male","Sex"] = 0
titanic.loc[titanic["Sex"]=="female","Sex"] = 1
# In[8]:
#登船的地点也是字符串,需要变换成数字,并填充缺失值
print titanic["Embarked"].unique()
titanic["Embarked"] = titanic["Embarked"].fillna('S')
#loc通过索引获取数据
titanic.loc[titanic["Embarked"]=="S","Embarked"] = 0
titanic.loc[titanic["Embarked"]=="C","Embarked"] = 1
titanic.loc[titanic["Embarked"]=="Q","Embarked"] = 2
# In[9]:
#使用回归算法(二分类)进行预测
#线性回归
from sklearn.linear_model import LinearRegression
#交叉验证:将训练数据集分成3份,对这三份进行交叉验证,比如使用1,2样本测试,3号样本验证
#对最后得到得数据取平均值
from sklearn.cross_validation import KFold
#选中一些特征
predictors = ["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]
alg = LinearRegression()
#n_folds代表将数据切分成3份,存在3层的交叉验证,titanic.shape[0]代表样本个数
kf = KFold(titanic.shape[0],n_folds=3,random_state=1)
predictions = []
for train,test in kf:
#iloc通过行号获取数据
train_predictors = titanic[predictors].iloc[train,:]
#获取对应的label值
train_target = titanic["Survived"].iloc[train]
#进行训练
alg.fit(train_predictors,train_target)
#进行预测
test_predictors = alg.predict(titanic[predictors].iloc[test,:])
#将结果加入到list中
predictions.append(test_predictors)
# In[10]:
import numpy as np
predictions = np.concatenate(predictions,axis=0)
#将0到1之间的区间值,变成具体的是否被获救,1代表被获救
predictions[predictions>.5] = 1
predictions[predictions<=.5]= 0
accuracy = sum(predictions[predictions == titanic["Survived"]])/len(predictions)
print accuracy
# In[11]:
#使用逻辑回归,它虽然是回归算法,但是一般都用来分类
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
alg = LogisticRegression(random_state=1)
scores = cross_validation.cross_val_score(alg,titanic[predictors],titanic["Survived"],cv=3)
#注意,逻辑回归和线性回归得到的结果类型不一样,逻辑回归是概率值,线性回归是[0,1]区间的数值
print (scores.mean())
# In[12]:
#从以上结果来看,线性回归和逻辑回归并没有得到很高的准确率,接下来使用随机森林进行分析
#随机森林
#1.样本是随机的,有放回的取样 2.特征的选择也是随机的,防止过拟合 3.多颗决策树,取平均值
from sklearn import cross_validation
from sklearn.ensemble import RandomForestClassifier
#选中一些特征
predictors = ["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked"]
#random_state=1表示此处代码多运行几次得到的随机值都是一样的,如果不设置,两次执行的随机值是不一样的
#n_estimators指定有多少颗决策树,树的分裂的条件是:min_samples_split代表样本不停的分裂,某一个节点上的样本如果只有2个了
#就不再继续分裂了,min_samples_leaf是控制叶子节点的最小个数
alg = RandomForestClassifier(random_state=1,n_estimators=10,min_samples_split=2,min_samples_leaf=1)
#进行交叉验证
kf = cross_validation.KFold(titanic.shape[0],n_folds=3,random_state=1)
scores = cross_validation.cross_val_score(alg,titanic[predictors],titanic["Survived"],cv=kf)
print (scores.mean())
# In[13]:
#决策树为10颗的时候效果仍然不好,将决策树数量调整到50颗,并且放松以下条件,使每颗树可以更浅一些
alg = RandomForestClassifier(random_state=1,n_estimators=50,min_samples_split=4,min_samples_leaf=2)
#进行交叉验证
kf = cross_validation.KFold(titanic.shape[0],n_folds=3,random_state=1)
scores = cross_validation.cross_val_score(alg,titanic[predictors],titanic["Survived"],cv=kf)
#我们会发现准确度有了近一步的提高
print (scores.mean())
# In[14]:
#特征提取是数据挖掘里很总要的一部分
#以上使用的特征都是数据里已经有的了,在真实的数据挖掘里我们常常没有合适的特征,需要我们自己取提取
#自己生成一个特征,家庭成员的大小:兄弟姐妹+老人孩子
titanic["FamilySize"] = titanic["SibSp"] + titanic["Parch"]
#名字的长度(据说国外的富裕的家庭都喜欢取很长的名字)
titanic["NameLength"] = titanic["Name"].apply(lambda x:len(x))
# In[24]:
import re
def get_title(name):
#此处是正则表达式:(+)代表匹配一个或者多个,\代表转义,总的来说就是匹配带点号的名称并且至少有一个字母开始
title_search = re.search('([A-Za-z]+)\.',name)
if title_search:
#返回匹配到的元组,group(1)代表返回匹配到的第一个()里的内容
return title_search.group(1)
return ""
titles = titanic["Name"].apply(get_title)
print (pandas.value_counts(titles))
print "......................."
#国外不同阶层的人都有不同的称呼
title_mapping = {"Mr":1,"Miss":2,"Mrs":3,"Master":4,"Dr":5,"Rev":6,"Major":7,"Col":7,"Mlle":8,"Mme":8,"Don":9,
"Lady":10,"Countess":10,"Jonkheer":10,"Sir":9,"Capt":7,"Ms":2}
for k,v in title_mapping.items():
#将不同的称呼替换成机器可以计算的数字
titles[titles==k]=v
print (pandas.value_counts(titles))
print "......................."
titanic["Title"] = titles
print titanic["Title"]
# In[25]:
#特征重要性分析
#分析不同特征对最终结果的影响
#例如衡量age列的重要程度时,什么也不干,得到一个错误率error1,
#加入一些噪音数据,替换原来的值(注意,此时其他列的数据不变),又得到一个一个错误率error2
#两个错误率的差值可以体现这一个特征的重要性
import numpy as np
from sklearn.feature_selection import SelectKBest,f_classif
import matplotlib.pyplot as plt
#选中一些特征
predictors = ["Pclass","Sex","Age","SibSp","Parch","Fare","Embarked","FamilySize","Title","NameLength"]
#选择特性
seletor = SelectKBest(f_classif,k=5)
seletor.fit(titanic[predictors],titanic["Survived"])
scores = -np.log10(seletor.pvalues_)
#显示不同特征的重要程度
plt.bar(range(len(predictors)),scores)
plt.xticks(range(len(predictors)),predictors,rotation="vertical")
plt.show()
# In[27]:
#通过以上特征的重要性分析,选择出4个最重要的特性,重新进行随机森林的算法
predictors = ["Pclass","Sex","Fare","Title"]
alg = RandomForestClassifier(random_state=1,n_estimators=50,min_samples_split=4,min_samples_leaf=2)
#进行交叉验证
kf = cross_validation.KFold(titanic.shape[0],n_folds=3,random_state=1)
scores = cross_validation.cross_val_score(alg,titanic[predictors],titanic["Survived"],cv=kf)
#目前的结果是没有得到提高,本处的目的是为了练习在随机森林中的特征选择,它对于实际的数据挖掘具有重要意义
print (scores.mean())
# In[34]:
#在竞赛中常用的耍赖的办法:集成多种算法,取最后每种算法的平均值,来减少过拟合
from sklearn.ensemble import GradientBoostingClassifier
import numpy as np
#GradientBoostingClassifier也是一种随机森林的算法,可以集成多个弱分类器,然后变成强分类器
algorithms = [
[GradientBoostingClassifier(random_state=1,n_estimators=25,max_depth=3),["Pclass","Sex","Age","Fare","Embarked","FamilySize","Title"]],
[LogisticRegression(random_state=1),["Pclass","Sex","Fare","FamilySize","Title","Age","Embarked"]]
]
kf = KFold(titanic.shape[0],n_folds=3,random_state=1)
predictions = []
for train,test in kf:
train_target = titanic["Survived"].iloc[train]
full_test_predictions = []
for alg,predictors in algorithms:
alg.fit(titanic[predictors].iloc[train,:],train_target)
test_predictions = alg.predict_proba(titanic[predictors].iloc[test,:].astype(float))[:,1]
full_test_predictions.append(test_predictions)
test_predictions = (full_test_predictions[0] + full_test_predictions[1])/2
test_predictions[test_predictions<=.5]=0
test_predictions[test_predictions>.5] =1
predictions.append(test_predictions)
predictions = np.concatenate(predictions,axis=0)
#发现准确率提高了一个百分点
accuracy = sum(predictions[predictions == titanic["Survived"]])/len(predictions)
print accuracy
| apache-2.0 |
dettanym/Complex-Networks-Node-Removal | ERgraph_random_removal.py | 1 | 3095 | #!/usr/bin/python
import igraph, random, bisect
import matplotlib.pyplot as plt
trials = 100
N = 1000
p = 0.01 # ( as p >= 1/(n-1), the graph will have a giant component initially)
division=100 # No. of divisions of the occupation probability scale
# Limits to vary the occupation probability within
upper_limit=division
lower_limit=0
vertices = list() # Array to store the vertices of the edges to be removed
divs = list() # Array to store the occupation probability values (independent variable).
sizes = list() # Array to store the sizes of the giant components (dependent variable).
# Setting up the X axis values-- the independent variable- obtained by scaling the loop counter 't' using the 'division' parameter.
divs = [float(x)/division for x in range(lower_limit, upper_limit,1)]
l_div=divs.__len__()
for i in range(0, trials):
# Set up a random ER graph that's connected:
g = igraph.Graph.Erdos_Renyi(N,p)
size1 = max(g.components("STRONG").sizes())
# Running over the occupation probability percentage figures from the lower_limit, to the upper_limit, as per the divs array.
for t in range(0, l_div, 1):
y = g.copy() # Re-generating a new copy of the random graph generated in this trial, as the old copy will have some nodes removed.
for x in range(0, N): # x is also to be used as a vertex ID- which ranges from 0 to n-1, both included, in igraph.
# If a random number is greater than the occupation probability, as from the divs list, add the vertex to the list of vertices to be removed. Else, do nothing.
if random.random() >= divs[t]:
vertices.append(x)
# Delete the randomly selected vertices.
if vertices.__len__() != 0:
y.delete_vertices(vertices)
# Append the relative size of the giant component wrt the initial size, for that probability of removal, to the 'sizes' list/
current_sizes=y.components("STRONG").sizes()
if current_sizes: # Probabilistically, the entire graph can get deleted for low occupation probabilities.
size=float(max(current_sizes))/size1 * 100
else:
size=0
# For the first trial, set up the sizes array. For all the consequtive ones, add the size values-to be normalised in the end.
if i == 0:
sizes.append(size)
else:
sizes[t]+=size
# Need to refresh the list of vertices before re-computing for the next occupation probability value.
del vertices[:]
length = sizes.__len__()
str1 = ""
# Normalizes the giant component sizes over the number of trials & forms a string to write the data of threshold Q v/s giant component sizes
for x in range(0, length, 1):
sizes[x] = float(sizes[x])/trials
print 1/float(N*p) # Theoretical Qc for ER model (Poisson approximation): 1/<k> or 1/(N*p)
# Plots the graph of relative giant component size v/s occupation probability values.
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('Occupation probability')
ax.set_ylabel('Relative size of giant component')
ax.set_title('Giant component size wrt occupation probability')
plt.plot(divs,sizes)
plt.show()
| mit |
mattjj/pyhawkes | examples/inference/svi_demo.py | 2 | 8788 | import numpy as np
import os
import cPickle
import gzip
# np.seterr(all='raise')
import matplotlib.pyplot as plt
from sklearn.metrics import adjusted_mutual_info_score, \
adjusted_rand_score, roc_auc_score
from pyhawkes.models import \
DiscreteTimeNetworkHawkesModelGammaMixture, \
DiscreteTimeStandardHawkesModel
from pyhawkes.plotting.plotting import plot_network
init_with_map = False
do_plot = False
def demo(seed=None):
"""
Fit a weakly sparse
:return:
"""
if seed is None:
seed = np.random.randint(2**32)
print "Setting seed to ", seed
np.random.seed(seed)
###########################################################
# Load some example data.
# See data/synthetic/generate.py to create more.
###########################################################
data_path = os.path.join("data", "synthetic", "synthetic_K4_C1_T1000.pkl.gz")
with gzip.open(data_path, 'r') as f:
S, true_model = cPickle.load(f)
T = S.shape[0]
K = true_model.K
B = true_model.B
dt = true_model.dt
dt_max = true_model.dt_max
###########################################################
# Initialize with MAP estimation on a standard Hawkes model
###########################################################
if init_with_map:
init_len = T
print "Initializing with BFGS on first ", init_len, " time bins."
init_model = DiscreteTimeStandardHawkesModel(K=K, dt=dt, dt_max=dt_max, B=B,
alpha=1.0, beta=1.0)
init_model.add_data(S[:init_len, :])
init_model.initialize_to_background_rate()
init_model.fit_with_bfgs()
else:
init_model = None
###########################################################
# Create a test weak spike-and-slab model
###########################################################
# Copy the network hypers.
# Give the test model p, but not c, v, or m
network_hypers = true_model.network_hypers.copy()
network_hypers['c'] = None
network_hypers['v'] = None
network_hypers['m'] = None
test_model = DiscreteTimeNetworkHawkesModelGammaMixture(K=K, dt=dt, dt_max=dt_max, B=B,
basis_hypers=true_model.basis_hypers,
bkgd_hypers=true_model.bkgd_hypers,
impulse_hypers=true_model.impulse_hypers,
weight_hypers=true_model.weight_hypers,
network_hypers=network_hypers)
test_model.add_data(S)
# F_test = test_model.basis.convolve_with_basis(S_test)
# Initialize with the standard model parameters
if init_model is not None:
test_model.initialize_with_standard_model(init_model)
# Initialize plots
if do_plot:
ln, im_net, im_clus = initialize_plots(true_model, test_model, S)
###########################################################
# Fit the test model with stochastic variational inference
###########################################################
N_iters = 500
minibatchsize = 100
delay = 1.0
forgetting_rate = 0.5
stepsize = (np.arange(N_iters) + delay)**(-forgetting_rate)
samples = []
for itr in xrange(N_iters):
print "SVI Iter: ", itr, "\tStepsize: ", stepsize[itr]
test_model.sgd_step(minibatchsize=minibatchsize, stepsize=stepsize[itr])
test_model.resample_from_mf()
samples.append(test_model.copy_sample())
# Update plot
if itr % 1 == 0 and do_plot:
update_plots(itr, test_model, S, ln, im_clus, im_net)
###########################################################
# Analyze the samples
###########################################################
analyze_samples(true_model, init_model, samples)
def initialize_plots(true_model, test_model, S):
K = true_model.K
C = true_model.C
R = true_model.compute_rate(S=S)
T = S.shape[0]
# Plot the true network
plt.ion()
plot_network(true_model.weight_model.A,
true_model.weight_model.W)
plt.pause(0.001)
# Plot the true and inferred firing rate
plt.figure(2)
plt.plot(np.arange(T), R[:,0], '-k', lw=2)
plt.ion()
ln = plt.plot(np.arange(T), test_model.compute_rate()[:,0], '-r')[0]
plt.show()
# Plot the block affiliations
plt.figure(3)
KC = np.zeros((K,C))
KC[np.arange(K), test_model.network.c] = 1.0
im_clus = plt.imshow(KC,
interpolation="none", cmap="Greys",
aspect=float(C)/K)
im_net = plot_network(np.ones((K,K)), test_model.weight_model.W_effective, vmax=0.5)
plt.pause(0.001)
plt.show()
plt.pause(0.001)
return ln, im_net, im_clus
def update_plots(itr, test_model, S, ln, im_clus, im_net):
K = test_model.K
C = test_model.C
T = S.shape[0]
plt.figure(2)
ln.set_data(np.arange(T), test_model.compute_rate()[:,0])
plt.title("\lambda_{%d}. Iteration %d" % (0, itr))
plt.pause(0.001)
plt.figure(3)
KC = np.zeros((K,C))
KC[np.arange(K), test_model.network.c] = 1.0
im_clus.set_data(KC)
plt.title("KxC: Iteration %d" % itr)
plt.pause(0.001)
plt.figure(4)
plt.title("W: Iteration %d" % itr)
im_net.set_data(test_model.weight_model.W_effective)
plt.pause(0.001)
def analyze_samples(true_model, init_model, samples):
N_samples = len(samples)
# Compute sample statistics for second half of samples
A_samples = np.array([s.weight_model.A for s in samples])
W_samples = np.array([s.weight_model.W for s in samples])
g_samples = np.array([s.impulse_model.g for s in samples])
lambda0_samples = np.array([s.bias_model.lambda0 for s in samples])
c_samples = np.array([s.network.c for s in samples])
p_samples = np.array([s.network.p for s in samples])
v_samples = np.array([s.network.v for s in samples])
offset = N_samples // 2
A_mean = A_samples[offset:, ...].mean(axis=0)
W_mean = W_samples[offset:, ...].mean(axis=0)
g_mean = g_samples[offset:, ...].mean(axis=0)
lambda0_mean = lambda0_samples[offset:, ...].mean(axis=0)
p_mean = p_samples[offset:, ...].mean(axis=0)
v_mean = v_samples[offset:, ...].mean(axis=0)
print "A true: ", true_model.weight_model.A
print "W true: ", true_model.weight_model.W
print "g true: ", true_model.impulse_model.g
print "lambda0 true: ", true_model.bias_model.lambda0
print ""
print "A mean: ", A_mean
print "W mean: ", W_mean
print "g mean: ", g_mean
print "lambda0 mean: ", lambda0_mean
print "v mean: ", v_mean
print "p mean: ", p_mean
# # Predictive log likelihood
# pll_init = init_model.heldout_log_likelihood(S_test)
# plt.figure()
# plt.plot(np.arange(N_samples), pll_init * np.ones(N_samples), 'k')
# plt.plot(np.arange(N_samples), plls, 'r')
# plt.xlabel("Iteration")
# plt.ylabel("Predictive log probability")
# plt.show()
# Compute the link prediction accuracy curves
if init_model is not None:
auc_init = roc_auc_score(true_model.weight_model.A.ravel(),
init_model.W.ravel())
else:
auc_init = 0.0
auc_A_mean = roc_auc_score(true_model.weight_model.A.ravel(),
A_mean.ravel())
auc_W_mean = roc_auc_score(true_model.weight_model.A.ravel(),
W_mean.ravel())
aucs = []
for A in A_samples:
aucs.append(roc_auc_score(true_model.weight_model.A.ravel(), A.ravel()))
plt.figure()
plt.plot(aucs, '-r')
plt.plot(auc_A_mean * np.ones_like(aucs), '--r')
plt.plot(auc_W_mean * np.ones_like(aucs), '--b')
plt.plot(auc_init * np.ones_like(aucs), '--k')
plt.xlabel("Iteration")
plt.ylabel("Link prediction AUC")
plt.ylim(-0.1, 1.1)
# Compute the adjusted mutual info score of the clusterings
amis = []
arss = []
for c in c_samples:
amis.append(adjusted_mutual_info_score(true_model.network.c, c))
arss.append(adjusted_rand_score(true_model.network.c, c))
plt.figure()
plt.plot(np.arange(N_samples), amis, '-r')
plt.plot(np.arange(N_samples), arss, '-b')
plt.xlabel("Iteration")
plt.ylabel("Clustering score")
plt.ioff()
plt.show()
# demo(2203329564)
# demo(2728679796)
demo(11223344)
| mit |
neeraj-kumar/nkpylib | nkplotutils.py | 1 | 4169 | """Lots of useful matplotlib utilities, written by Neeraj Kumar.
Licensed under the 3-clause BSD License:
Copyright (c) 2010, Neeraj Kumar (neerajkumar.org)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL NEERAJ KUMAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from nkutils import *
import matplotlib
try:
matplotlib.use('Agg')
except UserWarning: pass
from matplotlib import rc
from pylab import *
from matplotlib.ticker import *
from PIL import Image
def centerel(elsize, contsize):
"""Centers an element of the given size in the container of the given size.
Returns the coordinates of the top-left corner of the element relative to
the container."""
w, h = elsize
W, H = contsize
x = (W-w)//2
y = (H-h)//2
return (x, y)
def axisfontsize(ax, size):
"""Sets the fontsize for the axis tick labels"""
for lx in ax.get_xticklabels():
lx.set_size(size)
for ly in ax.get_yticklabels():
ly.set_size(size)
def plotresults(t, rocvals, colors, outname, type, figsize=None, fontfac=1.0):
"""Plots results as an ROC curve."""
font = {'fontname': 'Arial', 'fontsize': 22*fontfac}
if not figsize:
figsize = (10,10)
figure(3049, figsize=figsize)
clf()
xlabel('False Positive Rate', font)
ylabel('True Positive (Detection) Rate', font)
font['fontsize'] = 24*fontfac
ax = gca()
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
axisfontsize(ax, 18*fontfac)
ax.xaxis.set_major_locator(LinearLocator(11))
ax.yaxis.set_major_locator(LinearLocator(11))
if fontfac < 1:
ax.set_position((0.08, 0.12, 0.90, 0.85))
else:
ax.set_position((0.08, 0.06, 0.90, 0.92))
from matplotlib.font_manager import FontProperties
fp = FontProperties(family='sans-serif', size=16*fontfac)
legend(loc='lower right', prop=fp)
def genNumberedImage(im, rect, num, fsize=10):
"""Generates a new image using the given rect from the image, and puts a number on it"""
from PIL import ImageFont, ImageDraw
font = ImageFont.truetype('arial.ttf', fsize)
ret = im.crop(rect)
ret.load()
draw = ImageDraw.Draw(ret)
s = '%s' % num
size = draw.textsize(s, font=font)
pos = centerEl(size, ret.size)
#log("Got a string of '%s', a size of %s, a pos of %s" % (s, size, pos))
draw.text(pos, s, fill="blue", font=font)
del draw
return ret
def rearrangeim(im, ndivs=4):
"""Rearranges an image to make it more square sized"""
from PIL import Image
xincr = im.size[0]//ndivs
yincr = im.size[1]
size = (xincr, yincr*ndivs)
out = Image.new('RGB', size)
for i in range(ndivs):
out.paste(im.crop((xincr*i, 0, (xincr*(i+1)-1), yincr-1)), (0, yincr*i))
print 'Converted image of size %s to size %s' % (im.size, out.size)
return out
| bsd-3-clause |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Padova_cont/padova_cont_5/fullgrid/UV1.py | 31 | 9315 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [0, #977
1, #991
2, #1026
5, #1216
91, #1218
6, #1239
7, #1240
8, #1243
9, #1263
10, #1304
11,#1308
12, #1397
13, #1402
14, #1406
16, #1486
17] #1531
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty UV Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_UV_Lines.pdf')
plt.clf()
print "figure saved"
| gpl-2.0 |
kayarre/Tools | womersley/womersley/components.py | 1 | 39174 | import os
import numpy as np
import womersley.utils
import scipy.special as special
import matplotlib.pyplot as plt
#import womersley.components
class waveform_info(object):
def __init__(self, dir_path, name, radius, period, kind="profile", mu=0.0035, rho=1050.0):
"""create instance of converted coefficient information
"""
self.dir_path = dir_path
self.name = name
self.whole_path = os.path.join(dir_path, name)
self.read_pressure_coef()
self.rho = rho # density
self.mu = mu # viscosity
self.radius = radius # radius
self.period = period
self.kind = kind
self.Q = 0.0 # m^3/s
self.set_nu()
self.set_N_coeff(20)
def read_pressure_coef(self):
data = np.loadtxt(self.whole_path, delimiter="\t")
shape = data.shape
# take complex conjugate, PAT!
self.coef = np.array([ complex(data[1][j], data[0][j]) for j in range(data.shape[-1])])
def get_coef(self):
return self.coef
def get_period(self):
test = self.periodiszero()
#print(test)
if(test):
raise NameError("period is not set")
return self.period
def set_period(self, new_period):
self.period = new_period
self.set_womersley()
def periodiszero(self, rel_tol=1e-09, abs_tol=0.0):
return (abs(self.period-0.0) <= max(rel_tol * max(abs(self.period), abs(0.0)), abs_tol))
def set_womersley(self):
if ( self.period > 0.0):
self.alpha = 2.0*np.pi / self.period # womersley number
else:
self.alpha = 0.0
def get_womersley(self):
return self.alpha
def set_avg_Q(self, Q):
self.Q_avg = Q
def get_avg_Q(self):
return self.Q_avg
def set_peak_Q(self, peakflow):
self.Q_peak_flow = peakflow
def get_peak_Q(self):
return self.Q_peak_flow
def set_min_Q(self, minflow):
self.Q_min_flow = minflow
def get_min_Q(self):
return self.Q_min_flow
def set_avg_v(self, avg_v):
self.avg_v = avg_v
def get_avg_v(self):
return self.avg_v
def set_peak_v(self, peak_v):
self.peak_v = peak_v
def get_peak_v(self):
return self.peak_v
def set_min_v(self, min_v):
self.min_v = min_v
def get_min_v(self):
return self.min_v
def set_nu(self):
self.nu = self.mu / self.rho
def get_nu(self):
return self.nu
def set_N_coeff(self, N):
self.N = N
def get_N_coeff(self):
return self.N
"""
takes in a waveform and generates information about it
"""
class Waveform_Data(object):
""" Womerley waveform data class based on input data
Paramters:
dir_path (string): directory to read and write womersley files
data (numpy nx2): array with time and flow rate information
radius (float): the radius of the tube
waveform_ctr_name (string): the fourier cofficient file name
period (float): has a setter function, period of cardiac cycle in seconds
output_path (path): path to store figure data
scale (float): default convert millisecond data to seconds, and mm/second to m/second
mu (float): the dynamic viscosity (Pa s)
rho (float): the fluid density
kind ( string): the kind of waveform information ('ctr', 'peak', etc.) currently not used I guess
attributes:
time: the time
shape: shape of each waveform
radius: the inlet profile radius, meters
"""
def __init__(self, dir_path, data, radius, waveform_ctr_name,
period=0.0, output_path='', scale=1000.0, mu=0.0035, rho=1050.0, kind='ctr'):
"""Return and Image Stack Object
extract data
"""
self.dir_path = dir_path
self.data = data
self.time = self.data[:,0]/scale # seconds
self.flowvelocity = self.data[:,1]/scale # m/s
self.shape = self.time.shape
self.set_radius(radius)
self.waveform_ctr_name = waveform_ctr_name
self.Q = 0.0 # m^3/s
self.period = period
self.mu = mu #Pa s
self.rho = rho #kg/m**3
self.set_nu()
self.set_N_coeff(20)
self.set_kind( kind)
if (output_path == ''):
self.output_path = self.dir_path
else:
self.output_path = output_path
def get_kind(self):
return self.kind
def set_kind(self, k):
self.kind = k
def get_radius(self):
return self.radius
def set_radius(self, r):
self.radius = r
def set_output_path(self, output_path_str):
self.output_path = output_path_str
def get_period(self):
test = self.periodiszero()
#print(test)
if(test):
raise ValueError("period is not set")
return self.period
def set_period(self, new_period):
self.period = new_period
self.set_womersley()
def periodiszero(self, rel_tol=1e-09, abs_tol=0.0):
return (abs(self.period-0.0) <= max(rel_tol * max(abs(self.period), abs(0.0)), abs_tol))
def set_womersley(self):
if ( self.period > 0.0):
self.alpha = 2.0*np.pi / self.period # womersley number
else:
self.alpha = 0.0
def get_womersley(self):
return self.alpha
def write_coefficients(self, coef_new, name):
fp = os.path.join(self.output_path,name)
with open(fp, 'w') as fourier_f:
c_s = [(c.real, c.imag) for c in coef_new]
imag_c = []
real_c = []
for c in c_s:
# take complex conjugate, PAT!
imag_c.append("{0:16.16f}".format(c[1]))
real_c.append("{0:16.16f}".format(c[0]))
fourier_f.write('\t'.join(imag_c) + '\n') #imaginary first and real second
fourier_f.write('\t'.join(real_c) + '\n')
def read_pressure_coef(self, name):
fp = os.path.join(self.output_path, name)
data = np.loadtxt(fp, delimiter="\t")
shape = data.shape
# take complex conjugate, PAT!
coef = np.array([ complex(data[1][j], data[0][j]) for j in range(data.shape[-1])])
return coef
def save_fig(self, fig, fig_name="test.jpg"):
fig.savefig(os.path.join(self.output_path, fig_name))
def set_avg_Q(self, Q):
self.Q_avg = Q
def get_avg_Q(self):
return self.Q_avg
def set_peak_Q(self, peakflow):
self.Q_peak_flow = peakflow
def get_peak_Q(self):
return self.Q_peak_flow
def set_min_Q(self, minflow):
self.Q_min_flow = minflow
def get_min_Q(self):
return self.Q_min_flow
def set_mean_v(self, mean_v):
self.mean_v = mean_v
def get_mean_v(self):
return self.mean_v
def set_peak_v(self, peak_v):
self.peak_v = peak_v
def get_peak_v(self):
return self.peak_v
def set_nu(self):
self.nu = self.mu / self.rho
def get_nu(self):
return self.nu
def set_N_coeff(self, N):
self.N = N
def get_N_coeff(self):
return self.N
"""
reads normative data from paper values
"""
class norm_waveform_data(object):
""" Womerley waveform data class based on normative input data
Paramters:
dir_path (string): directory to read and write womersley files
data (numpy nx2): array with time and flow rate information
radius (float): the radius of the tube
waveform_ctr_name (string): the fourier cofficient file name
period (float): has a setter function, period of cardiac cycle in seconds
output_path (path): path to store figure data
scale (float): default convert millisecond data to seconds, and mm/second to m/second
mu (float): the dynamic viscosity (Pa s)
rho (float): the fluid density
kind ( string): the kind of waveform information ('ctr', 'peak', etc.) currently not used I guess
attributes:
time: the time
shape: shape of each waveform
radius: the inlet profile radius, meters
"""
def __init__(self, dir_path, txt_name, radius, waveform_ctr_name,
period=0.0, output_path='', scale=1000.0, mu=0.0035, rho=1050.0, kind='ctr'):
"""Return and Image Stack Object
extract data
"""
self.dir_path = dir_path
self.txt_path = os.path.join(dir_path, txt_name)
self.data = np.loadtxt(self.txt_path, delimiter=",")
self.time = self.data[:,0]/scale # seconds
self.flowrate = self.data[:,1]/(10.**6*60.) # m^3/s
self.shape = self.time.shape
self.set_radius(radius)
self.waveform_ctr_name = waveform_ctr_name
self.Q = 0.0 # m^3/s
self.period = period
self.mu = mu #Pa s
self.rho = rho #kg/m**3
self.set_nu()
self.set_N_coeff(20)
self.set_kind(kind)
if (output_path == ''):
self.output_path = self.dir_path
else:
self.output_path = output_path
def get_kind(self):
return self.kind
def set_kind(self, k):
self.kind = k
def get_radius(self):
return self.radius
def set_radius(self, r):
self.radius = r
def set_output_path(self, output_path_str):
self.output_path = output_path_str
def get_period(self):
test = self.periodiszero()
#print(test)
if(test):
raise ValueError("period is not set")
return self.period
def set_period(self, new_period):
self.period = new_period
self.set_womersley()
def periodiszero(self, rel_tol=1e-09, abs_tol=0.0):
return (abs(self.period-0.0) <= max(rel_tol * max(abs(self.period), abs(0.0)), abs_tol))
def set_womersley(self):
if ( self.period > 0.0):
self.alpha = 2.0*np.pi / self.period # womersley number
else:
self.alpha = 0.0
def get_womersley(self):
return self.alpha
def write_coefficients(self, coef_new, name):
fp = os.path.join(self.output_path,name)
with open(fp, 'w') as fourier_f:
c_s = [(c.real, c.imag) for c in coef_new]
imag_c = []
real_c = []
for c in c_s:
# take complex conjugate, PAT!
imag_c.append("{0:16.16f}".format(c[1]))
real_c.append("{0:16.16f}".format(c[0]))
fourier_f.write('\t'.join(imag_c) + '\n') #imaginary first and real second
fourier_f.write('\t'.join(real_c) + '\n')
def read_pressure_coef(self, name):
fp = os.path.join(self.output_path, name)
data = np.loadtxt(fp, delimiter="\t")
shape = data.shape
# take complex conjugate, PAT!
coef = np.array([ complex(data[1][j], data[0][j]) for j in range(data.shape[-1])])
return coef
def save_fig(self, fig, fig_name="test.jpg"):
fig.savefig(os.path.join(self.output_path, fig_name))
def set_avg_Q(self, Q):
self.Q_avg = Q
def get_avg_Q(self):
return self.Q_avg
def set_peak_Q(self, peakflow):
self.Q_peak_flow = peakflow
def get_peak_Q(self):
return self.Q_peak_flow
def set_min_Q(self, minflow):
self.Q_min_flow = minflow
def get_min_Q(self):
return self.Q_min_flow
def set_mean_v(self, mean_v):
self.mean_v = mean_v
def get_mean_v(self):
return self.mean_v
def set_peak_v(self, peak_v):
self.peak_v = peak_v
def get_peak_v(self):
return self.peak_v
def set_nu(self):
self.nu = self.mu / self.rho
def get_nu(self):
return self.nu
def set_N_coeff(self, N):
self.N = N
def get_N_coeff(self):
return self.N
class coefficient_converter(object):
coef_types = ['ctr', 'mean', 'press', 'shear', "profile"]
def __init__(self, waveform_obj, coef, kind="ctr"):
"""create instance of converted coefficient information
"""
self.rho = waveform_obj.rho # density
self.mu = waveform_obj.mu # viscosity
self.R = waveform_obj.radius # radius
self.period = waveform_obj.period
self.init_omega()
self.init_alpha()
self.set_alpha()# womersley number
self.kind = kind
self.N_coef = coef.shape[0] # assume numpy array
self.set_womersley_params()
self.input_coef = coef
self.coef_dict = {}
self.set_coef()
def set_womersley_params(self):
"""Returns the womersley number alpha.
@param R: pipe radius
@param omega: oscillation frequency
@param mu: viscosity
@param rho: fluid density
"""
self.omega_n = self.omega * np.arange(1.0, self.N_coef)
alpha_n = self.alpha * np.sqrt(np.arange(1.0, self.N_coef))
self.lambda_ = np.sqrt(1.0j**3) * alpha_n
self.J1 = special.jn(1, self.lambda_)
self.J0 = special.jn(0, self.lambda_)
def coef_norm(self, kind):
coef = np.zeros(self.input_coef.shape, dtype=np.complex)
if (kind == "mean"):
coef[0] = self.R**2 /(8.0*self.mu)
coef[1:] = 1.0 - 2.0 / self.lambda_ * self.J1 / self.J0
elif (kind == "ctr"):
coef[0] = self.R**2 / (4.0*self.mu)
coef[1:] = 1.0 - 1.0 / self.J0
elif (kind == "shear"):
coef[0] = self.R / 2.0
coef[1:] = - self.mu * self.lambda_ / self.R * self.J1 / self.J0
elif (kind == "press"):
coef[0] = -1.0
coef[1:] = 1.0j*self.rho*self.omega_n
elif (kind == "profile"):
coef[0] = self.R**2 / (4.0*self.mu)
coef[1:] = np.ones(self.omega_n.shape)
else:
print("unknown kind given")
raise Exception('Unknown kind: {0}'.format(kind))
return coef
def convert(self, coef_in_kind, coeff_set_list):
coef_normed = self.coef_norm(coef_in_kind)
for k in coeff_set_list:
# gets the normed info based on kind
self.coef_dict[k] = self.input_coef * self.coef_norm(k) / coef_normed
def set_coef(self):
coef_set_list = []
for k in self.coef_types:
#if(k == self.kind):
#self.coef_dict[self.kind] = self.input_coef
#else:
coef_set_list.append(k)
self.convert(self.kind, coef_set_list)
def set_radius(self, radius):
self.R = radius
self.update()
def set_viscosity(self, mu):
self.mu = mu
self.update()
def set_density(self, rho):
self.rho = rho
self.update()
def init_omega(self):
self.omega = 2.0*np.pi / self.period
def set_omega(self):
self.omega = 2.0*np.pi / self.period
self.update()
def set_period(self, period):
self.period = period
self.set_omega()
def set_alpha(self):
self.alpha = np.sqrt(self.omega * self.rho /self.mu) * self.R
def init_alpha(self):
self.alpha = np.sqrt(self.omega * self.rho /self.mu) * self.R
self.update()
def update(self):
self.set_alpha()
self.set_womersley_params()
self.set_coef()
class converter_from_norm(object):
coef_types = ['ctr', 'mean', 'press', 'shear', "profile"]
def __init__(self, waveform_obj):
"""create instance of converted coefficient information
"""
self.rho = waveform_obj.rho # density
self.mu = waveform_obj.mu # viscosity
self.R = waveform_obj.radius # radius
self.period = waveform_obj.period
self.init_omega()
self.init_alpha()
self.set_alpha()# womersley number
self.kind = waveform_obj.kind
self.N_coef = waveform_obj.coef.shape[0] # assume numpy array
self.set_womersley_params()
self.input_coef = waveform_obj.coef
self.coef_dict = {}
self.set_coef()
def set_womersley_params(self):
"""Returns the womersley number alpha.
@param R: pipe radius
@param omega: oscillation frequency
@param mu: viscosity
@param rho: fluid density
"""
self.omega_n = self.omega * np.arange(1.0, self.N_coef)
alpha_n = self.alpha * np.sqrt(np.arange(1.0, self.N_coef))
self.lambda_ = np.sqrt(1.0j**3) * alpha_n
self.J1 = special.jn(1, self.lambda_)
self.J0 = special.jn(0, self.lambda_)
def coef_norm(self, kind):
coef = np.zeros(self.input_coef.shape, dtype=np.complex)
if (kind == "mean"):
coef[0] = self.R**2 /(8.0*self.mu)
coef[1:] = 1.0 - 2.0 / self.lambda_ * self.J1 / self.J0
elif (kind == "ctr"):
coef[0] = self.R**2 / (4.0*self.mu)
coef[1:] = 1.0 - 1.0 / self.J0
elif (kind == "shear"):
coef[0] = self.R / 2.0
coef[1:] = - self.mu * self.lambda_ / self.R * self.J1 / self.J0
elif (kind == "press"):
coef[0] = 1.0
coef[1:] = 1.0j*self.rho*self.omega_n
elif (kind == "profile"):
coef[0] = self.R**2 / (4.0*self.mu)
coef[1:] = np.ones(self.omega_n.shape)
else:
print("unknown kind given")
raise Exception('Unknown kind: {0}'.format(kind))
return coef
def convert(self, coef_in_kind, coeff_set_list):
coef_normed = self.coef_norm(coef_in_kind)
for k in coeff_set_list:
# gets the normed info based on kind
self.coef_dict[k] = self.input_coef * self.coef_norm(k) / coef_normed
def set_coef(self):
coef_set_list = []
for k in self.coef_types:
#if(k == self.kind):
#self.coef_dict[self.kind] = self.input_coef
#else:
coef_set_list.append(k)
self.convert(self.kind, coef_set_list)
def set_radius(self, radius):
self.R = radius
self.update()
def set_viscosity(self, mu):
self.mu = mu
self.update()
def set_density(self, rho):
self.rho = rho
self.update()
def init_omega(self):
self.omega = 2.0*np.pi / self.period
def set_omega(self):
self.omega = 2.0*np.pi / self.period
self.update()
def set_period(self, period):
self.period = period
self.set_omega()
def set_alpha(self):
self.alpha = np.sqrt(self.omega * self.rho /self.mu) * self.R
def init_alpha(self):
self.alpha = np.sqrt(self.omega * self.rho /self.mu) * self.R
def update(self):
self.set_alpha()
self.set_womersley_params()
self.set_coef()
# this converts the coefficients from the centerline flow velocity using the above function
# beware it would be different from using a the mean velocity
def plot_womersley(waveform_obj, t, v_mean, time=0.0, N_coefs=20, save_clk=False):
"""
@param waveform obj, has all the information regarding the waveform
@param t, the time values
@param v, the centerline velocity coefficients
@time t, single point in time to plot the velocity profile
"""
#from womersley.components import coefficient_converter
T = waveform_obj.get_period()
N_coef = N_coefs
omega = (2.0*np.pi)/T
R = waveform_obj.radius # meters
mu = waveform_obj.mu #Pa s
rho = waveform_obj.rho #kg/m**3
coef_peak = womersley.utils.generate_coefficents(v_mean, 1024)
coeff_inst_2 = coefficient_converter(waveform_obj, coef_peak, "ctr")
flow_test, flow_time_test = womersley.utils.reconstruct(coeff_inst_2.coef_dict["mean"][:N_coef], T)
# flow added from additional outlet mean flow for the
# right posterior inferior cerebeller
flow_add = flow_test + 1.12647226e-07/(np.pi*R**2)/flow_time_test[-1]
flow_rate = np.pi*R**2*flow_test
flow_total = np.trapz(flow_rate, flow_time_test)
print('Flow, Area under curve: {0} $ml/cycle$'.format(flow_total.real*10**6*60))
print('Flow average: {0} $ml/min$'.format(flow_total.real/T*10**6*60))
coef_mean_new = womersley.utils.generate_coefficents(flow_add, 1024)
coeff_inst = coefficient_converter(waveform_obj, coef_mean_new, "mean")
#stuff to get coefficients
coef_mean = coeff_inst.coef_dict["mean"][0:N_coef]
coef_v = coeff_inst.coef_dict["ctr"][0:N_coef] #peak_2_mean(coef_v, rho, omega, mu, R)
coef_p = coeff_inst.coef_dict["press"][0:N_coef] #peak_2_press(coef_v, rho, omega, mu, R)
coef_shear = coeff_inst.coef_dict["shear"][0:N_coef] #peak_2_shear(coef_v, rho, omega, mu, R)
coef_flow = coeff_inst.coef_dict["profile"][0:N_coef] #peak_2_shear(coef_v, rho, omega, mu, R)
#print(coef_new)
# Lets create some data to visualize
r = np.linspace(-R, R, 2000)
# convert velocity coefficients to pressure coefficients
#write fourier coefficients
#write fourier coefficients
#if ( save_clk == True):
#print("got here")
waveform_obj.write_coefficients(coef_flow, waveform_obj.waveform_ctr_name)
print("saving coefficients {0:s}".format(waveform_obj.waveform_ctr_name))
#print(coef)
u = []
us = []
ss = []
for r_pt in r:
#print(womersley_velocity(coef_p, rho, omega, mu, R, r_pt, time))
u.append(womersley.utils.womersley_velocity(coef_flow, rho, omega, mu, R, r_pt, time).real)
sol = womersley.utils.womersley_parts(coef_flow, rho, omega, mu, R, r_pt, time)
ss.append(sol[0].real)
us.append(sol[1].real)
#print(u)
# Lets create a plotting grid with gridspec
# Create a 2 x 2 plotting grid object with horizontal and vertical spacing wspace and hspace
gs = plt.GridSpec(7, 2, wspace=0.2, hspace=0.2)
# Create a figure
fig = plt.figure(figsize=(11, 19))
# SUBFIGURE 1
# Create subfigure 1 (takes over two rows (0 to 1) and column 0 of the grid)
ax1 = fig.add_subplot(gs[0, :])
ax1.set_title('Womersley Velocity profile', fontsize=20)
ax1.set_xlabel('velocity (m/s)', fontsize=20)
ax1.set_ylabel('Radius, (m)', fontsize=20)
#ax1.plot(np.abs(u)/100., r)
ax1.plot(u, r)
int_mid = len(u)//2
ax1.plot(u[int_mid], r[int_mid], 'go',
markersize=10, label="{0:2.3e}".format(u[int_mid]))
#ax1.plot(np.abs(u)[::-1], -1.0*r[::-1])
# Arrange axis and labels
ax1.legend()
ax6 = fig.add_subplot(gs[1, :])
ax6.set_title('Womersley steady', fontsize=20)
ax6.set_xlabel('velocity (m/s)', fontsize=20)
ax6.set_ylabel('Radius, (m)', fontsize=20)
#ax1.plot(np.abs(u)/100., r)
ax6.plot(ss, r)
int_mid = len(ss)//2
ax6.plot(ss[int_mid], r[int_mid], 'go',
markersize=10, label="{0:2.3e}".format(ss[int_mid]))
#ax1.plot(np.abs(u)[::-1], -1.0*r[::-1])
# Arrange axis and labels
ax6.legend()
ax7 = fig.add_subplot(gs[2, :])
ax7.set_title('Womersley unsteady', fontsize=20)
ax7.set_xlabel('velocity (m/s)', fontsize=20)
ax7.set_ylabel('Radius, (m)', fontsize=20)
#ax1.plot(np.abs(u)/100., r)
ax7.plot(us, r)
int_mid = len(us)//2
ax7.plot(us[int_mid], r[int_mid], 'go',
markersize=10, label="{0:2.3e}".format(us[int_mid]))
#ax1.plot(np.abs(u)[::-1], -1.0*r[::-1])
# Arrange axis and labels
ax7.legend()
ax2 = fig.add_subplot(gs[3, :])
ax2.set_title('Internal Carotid Artery', fontsize=20)
#ax2.set_xlabel(r'$t$', fontsize=20)
ax2.set_ylabel(r'Peak, $V(t)$ $m/s$', fontsize=20)
Q = womersley.utils.reconstruct_pt(coef_v, time, T)
Q2, t2 = womersley.utils.reconstruct(coef_v, T)
vel_total = np.trapz(Q2,t2)
print('peak velocity, Area under curve: {0} $m/s/cycle$'.format(vel_total.real))
print('peak velocity average: {0} $m^3/s$'.format(vel_total.real/T))
print('velocity peak: {0} velocity min: {1} $m^3/s$'.format(np.max(Q2.real),
np.min(Q2.real)))
ax2.plot(t2, np.real(Q2))
ax2.plot(time, np.real(Q), 'ro', markersize=10, label = "{0:2.3e}".format(Q.real))
ax2.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax2.legend()
ax1.axis([-0.06, 1.0,-R,R])
ax2.set_xlim((0, T))
ax3 = fig.add_subplot(gs[4, :])
#ax3.set_title('Common Carotid Artery')
#ax3.set_xlabel(r'$t$', fontsize=20)
ax3.set_ylabel(r'Flow Rate, $Q(t)$ $ml/min$', fontsize=20)
mean = womersley.utils.reconstruct_pt(coef_mean, time, T)
mean3, t3 = womersley.utils.reconstruct(coef_mean, T)
flow_total = np.pi*R**2*np.trapz(mean3,t3)
flow_test = np.pi*R**2*mean3
print('Flow, Area under curve: {0} $ml/cycle$'.format(flow_total.real*10**6*60))
print('Flow average: {0} $ml/min$'.format(flow_total.real/T*10**6*60))
print('Flow peak: {0} Flow min: {1} $ml/min$'.format(np.max(flow_test.real)*10**6*60,
np.min(flow_test.real)*10**6*60))
print('Pulsatility Index (Q_max - Q_min)/Q_mean: {0} '.format((np.max(flow_test.real)- np.min(flow_test.real))
/ flow_total.real/T))
ax3.plot(t3, np.real(flow_test)*10**6*60)
ax3.plot(time, np.real(mean)*np.pi*R**2*10**6*60, 'ro',
markersize=10, label = "{0:2.3e}".format(mean.real*np.pi*R**2*10**6*60))
ax3.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax3.legend()
ax3.set_xlim((0, T))
ax4 = fig.add_subplot(gs[5, :])
#ax4.set_title('Common Carotid Artery')
ax4.set_xlabel(r'$t$', fontsize=20)
ax4.set_ylabel(r'Shear, $\tau_{wall}$ $Pa$', fontsize=20)
shear = womersley.utils.reconstruct_pt(coef_shear, time, T)
shear4, t4 = womersley.utils.reconstruct(coef_shear, T)
shear_total = np.trapz(shear4,t4)
print('shear, Area under curve: {0} $Pa/cycle$'.format(shear_total.real))
print('shear average: {0} $Pa/s$'.format(shear_total.real/T))
print('shear peak: {0} shear min: {1} $Pa/s$'.format(np.max(shear4.real),
np.min(shear4.real)))
ax4.plot(t4, np.real(shear4))
ax4.plot(time, np.real(shear), 'ro', markersize=10, label = "{0:2.3e}".format(shear.real))
ax4.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax4.legend()
ax4.set_xlim((0, T))
ax5 = fig.add_subplot(gs[6, :])
#ax4.set_title('Common Carotid Artery')
ax5.set_xlabel(r'$t$', fontsize=20)
ax5.set_ylabel(r'Pressure, $P$ $Pa$', fontsize=20)
press = womersley.utils.reconstruct_pt(coef_p, time, T)
press5, t5 = womersley.utils.reconstruct(coef_p, T)
press_total = np.trapz(press5,t5)
print('pressure, Area under curve: {0} $Pa/cycle$'.format(press_total.real))
print('pressure average: {0} $Pa/s$'.format(press_total.real/T))
print('pressure peak: {0} pressure min: {1} $Pa/s$'.format(np.max(press5.real),
np.min(press5.real)))
ax5.plot(t5, np.real(press5))
ax5.plot(time, np.real(press), 'ro', markersize=10, label = "{0:2.2e}".format(press.real))
ax5.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax5.legend()
ax5.set_xlim((0, T))
#ensure all subplots fit bounding box
gs.tight_layout(fig)
#plt.show()
# this converts the coefficients from the centerline flow velocity using the above function
# beware it would be different from using a the mean velocity
def plot_womersley_test(waveform_obj, coef, time=0.0):
coef_p = coef
T = waveform_obj.get_period()
omega = (2.0*np.pi)/T
R = waveform_obj.radius # meters
mu = waveform_obj.mu #Pa s
rho = waveform_obj.rho #kg/m**3
N_coef = waveform_obj.get_N_coeff()
coeff_inst = coefficient_converter(waveform_obj, coef_p, "profile")
#stuff to get coefficients
coef_mean = coeff_inst.coef_dict["mean"][0:N_coef]
coef_v = coeff_inst.coef_dict["ctr"][0:N_coef] #peak_2_mean(coef_v, rho, omega, mu, R)
coef_p = coeff_inst.coef_dict["press"][0:N_coef] #peak_2_press(coef_v, rho, omega, mu, R)
coef_shear = coeff_inst.coef_dict["shear"][0:N_coef] #peak_2_shear(coef_v, rho, omega, mu, R)
coef_flow = coeff_inst.coef_dict["profile"][0:N_coef] #peak_2_shear(coef_v, rho, omega, mu, R)
# Lets create some data to visualize
r = np.linspace(-R, R, 200)
t = np.linspace(0.0, T, 300)
# convert velocity coefficients to pressure coefficients
u = []
for r_pt in r:
#print(womersley_velocity(coef_p, rho, omega, mu, R, r_pt, time))
u.append(womersley.utils.womersley_velocity(coef_flow, rho, omega, mu, R, r_pt, time).real)
#print(u)
# Lets create a plotting grid with gridspec
# Create a 2 x 2 plotting grid object with horizontal and vertical spacing wspace and hspace
gs = plt.GridSpec(5, 2, wspace=0.2, hspace=0.2)
# Create a figure
fig = plt.figure(figsize=(11, 15))
# SUBFIGURE 1
# Create subfigure 1 (takes over two rows (0 to 1) and column 0 of the grid)
ax1 = fig.add_subplot(gs[0, :])
ax1.set_title('Womersley Velocity profile', fontsize=20)
ax1.set_xlabel('velocity (m/s)', fontsize=20)
ax1.set_ylabel('Radius, (m)', fontsize=20)
#ax1.plot(np.abs(u)/100., r)
ax1.plot(u, r)
ax1.plot(u[100], r[100], 'ro', markersize=10, label = "{0:2.3e}".format(u[100]))
ax1.legend()
#ax1.plot(np.abs(u)[::-1], -1.0*r[::-1])
# Arrange axis and labels
ax2 = fig.add_subplot(gs[1, :])
ax2.set_title('Common Carotid Artery', fontsize=20)
#ax2.set_xlabel(r'$t$', fontsize=20)
ax2.set_ylabel(r'Peak, $V(t)$ $m/s$', fontsize=20)
Q = womersley.utils.reconstruct_pt(coef_v, time, T)
Q2, t2 = womersley.utils.reconstruct2(coef_v, T)
ax2.plot(t2, np.real(Q2))
ax2.plot(time, np.real(Q), 'ro', markersize=10, label = "{0:2.3e}".format(Q.real))
ax2.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax2.legend()
ax1.axis([-0.06, 1.0,-R,R])
ax2.set_xlim((0, T))
ax3 = fig.add_subplot(gs[2, :])
#ax3.set_title('Common Carotid Artery')
#ax3.set_xlabel(r'$t$', fontsize=20)
ax3.set_ylabel(r'Mean, $\bar{V}(t)$ $m/s$', fontsize=20)
mean = womersley.utils.reconstruct_pt(coef_mean, time, T)
mean3, t3 = womersley.utils.reconstruct2(coef_mean, T)
ax3.plot(t3, np.real(mean3))
ax3.plot(time, np.real(mean), 'ro', markersize=10, label = "{0:2.3e}".format(mean.real))
ax3.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax3.legend()
ax3.set_xlim((0, T))
ax4 = fig.add_subplot(gs[3, :])
#ax4.set_title('Common Carotid Artery')
ax4.set_xlabel(r'$t$', fontsize=20)
ax4.set_ylabel(r'Shear, $\tau_{wall}$ $Pa$', fontsize=20)
shear = womersley.utils.reconstruct_pt(coef_shear, time, T)
shear4, t4 = womersley.utils.reconstruct2(coef_shear, T)
ax4.plot(t4, np.real(shear4))
ax4.plot(time, np.real(shear), 'ro', markersize=10, label = "{0:2.3e}".format(shear.real))
ax4.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax4.legend()
ax4.set_xlim((0, T))
ax5 = fig.add_subplot(gs[4, :])
#ax4.set_title('Common Carotid Artery')
ax5.set_xlabel(r'$t$', fontsize=20)
ax5.set_ylabel(r'Pressure, $P$ $Pa$', fontsize=20)
press = womersley.utils.reconstruct_pt(coef_p, time, T)
press5, t5 = womersley.utils.reconstruct2(coef_p, T)
ax5.plot(t5, np.real(press5))
ax5.plot(time, np.real(press), 'ro', markersize=10, label = "{0:2.3e}".format(press.real))
ax5.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax5.legend()
ax5.set_xlim((0, T))
#ensure all subplots fit bounding box
gs.tight_layout(fig)
# this converts the coefficients from the centerline flow velocity using the above function
# beware it would be different from using a the mean velocity
def plot_womersley_mean(waveform_obj, coef, time=0.0):
coef_p = coef
T = waveform_obj.get_period()
omega = (2.0*np.pi)/T
R = waveform_obj.radius # meters
mu = waveform_obj.mu #Pa s
rho = waveform_obj.rho #kg/m**3
coeff_inst = coefficient_converter(waveform_obj, coef_p, "profile")
coef_peak = coeff_inst.coef_dict["ctr"]
coef_mean = coeff_inst.coef_dict["mean"] #peak_2_mean(coef_v, rho, omega, mu, R)
coef_v = coeff_inst.coef_dict["ctr"] #peak_2_press(coef_v, rho, omega, mu, R)
# Lets create some data to visualize
r = np.linspace(-R, R, 200)
t = np.linspace(0.0, T, 300)
# convert velocity coefficients to pressure coefficients
#print(u)
# Lets create a plotting grid with gridspec
# Create a 2 x 2 plotting grid object with horizontal and vertical spacing wspace and hspace
gs = plt.GridSpec(3, 2, wspace=0.2, hspace=0.2)
# Create a figure
fig = plt.figure(figsize=(11, 13))
ax3 = fig.add_subplot(gs[0, :])
#ax3.set_title('Common Carotid Artery')
#ax3.set_xlabel(r'$t$', fontsize=20)
ax3.set_ylabel(r'Mean, $\bar{V}(t)$ $m/s$', fontsize=20)
mean = womersley.utils.reconstruct_pt(coef_mean, time, T)
mean3, t3 = womersley.utils.reconstruct2(coef_mean, T, 1000)
Q_flow = np.pi*R**2*mean3
Q_total = np.trapz(Q_flow,t3)
waveform_obj.set_avg_Q(Q_total.real/T)
waveform_obj.set_peak_Q(np.max(Q_flow.real))
waveform_obj.set_min_Q(np.min(Q_flow.real))
print('Q, Area under curve: {0} $m^3/cycle$'.format(Q_total.real))
print('Q average: {0} $m^3/s$'.format(waveform_obj.get_avg_Q()))
print('Q peak, {0} Q min, {1} $m^3/s$'.format(waveform_obj.get_peak_Q(), waveform_obj.get_min_Q()))
q_pt = np.pi*R**2*mean
ax3.plot(t3, np.real(mean3))
ax3.plot(time, np.real(mean), 'ro', markersize=10, label = "{0:2.2e}".format(mean.real))
ax3.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax3.legend()
ax3.set_xlim((0, T))
ax4 = fig.add_subplot(gs[1, :])
#ax4.set_title('Common Carotid Artery')
ax4.set_xlabel(r'$t$', fontsize=20)
ax4.set_ylabel(r'Q, $Q(t)$ $ml/min$', fontsize=20)
ax4.plot(t3, np.real(Q_flow)*10.**6*60.)
ax4.plot(time, np.real(q_pt)*10.**6*60., 'ro', markersize=10,
label = "{0:2.2e}".format(q_pt.real*10.**6*60.))
ax4.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax4.legend()
ax4.set_xlim((0, T))
ax5 = fig.add_subplot(gs[2, :])
#ax4.set_title('Common Carotid Artery')
ax5.set_xlabel(r'$t$', fontsize=20)
ax5.set_ylabel(r'Pressure, $P$ $Pa$', fontsize=20)
press = womersley.utils.reconstruct_pt(coef_p, time, T)
press5, t5 = womersley.utils.reconstruct2(coef_p, T)
dP = np.max(press5.real) - np.min(press5.real)
#print(np.max(press5.real), np.min(press5.real))
print('dP: {0} $Pa/s$'.format(dP/T))
ax5.plot(t5, np.real(press5))
ax5.plot(time, np.real(press), 'ro', markersize=10, label = "{0:2.2e}".format(press.real))
ax5.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax5.legend()
ax5.set_xlim((0, T))
#ensure all subplots fit bounding box
gs.tight_layout(fig)
# this converts the coefficients from the centerline flow velocity using the above function
# beware it would be different from using a the mean velocity
def plot_womersley_mean_norm(waveform_obj, time=0.0):
T = waveform_obj.get_period()
omega = (2.0*np.pi)/T
R = waveform_obj.radius # meters
mu = waveform_obj.mu #Pa s
rho = waveform_obj.rho #kg/m**3
coeff_inst = converter_from_norm(waveform_obj)
coef_peak = coeff_inst.coef_dict["ctr"]
coef_mean = coeff_inst.coef_dict["mean"] #peak_2_mean(coef_v, rho, omega, mu, R)
coef_v = coeff_inst.coef_dict["ctr"] #peak_2_press(coef_v, rho, omega, mu, R)
coef_p = coeff_inst.coef_dict["press"]
# Lets create some data to visualize
r = np.linspace(-R, R, 200)
t = np.linspace(0.0, T, 300)
# convert velocity coefficients to pressure coefficients
#print(u)
# Lets create a plotting grid with gridspec
# Create a 2 x 2 plotting grid object with horizontal and vertical spacing wspace and hspace
gs = plt.GridSpec(3, 2, wspace=0.2, hspace=0.2)
# Create a figure
fig = plt.figure(figsize=(11, 13))
ax3 = fig.add_subplot(gs[0, :])
#ax3.set_title('Common Carotid Artery')
#ax3.set_xlabel(r'$t$', fontsize=20)
ax3.set_ylabel(r'Mean, $\bar{V}(t)$ $m/s$', fontsize=20)
mean = womersley.utils.reconstruct_pt(coef_mean, time, T)
mean3, t3 = womersley.utils.reconstruct2(coef_mean, T, 1000)
Q_flow = np.pi*R**2*mean3
Q_total = np.trapz(Q_flow,t3)
waveform_obj.set_avg_Q(Q_total.real/T)
waveform_obj.set_peak_Q(np.max(Q_flow.real))
waveform_obj.set_min_Q(np.min(Q_flow.real))
print('Q, Area under curve: {0} $ml/cycle$'.format(Q_total.real*10**6*60))
print('Q average: {0} $ml/min$'.format(waveform_obj.get_avg_Q()*10**6*60))
print('Q peak, {0} Q min, {1} $ml/min$'.format(waveform_obj.get_peak_Q()*10**6*60, waveform_obj.get_min_Q()*10**6*60))
q_pt = np.pi*R**2*mean
ax3.plot(t3, np.real(mean3))
ax3.plot(time, np.real(mean), 'ro', markersize=10, label = "{0:2.2e}".format(mean.real))
ax3.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax3.legend()
ax3.set_xlim((0, T))
ax4 = fig.add_subplot(gs[1, :])
#ax4.set_title('Common Carotid Artery')
ax4.set_xlabel(r'$t$', fontsize=20)
ax4.set_ylabel(r'Q, $Q(t)$ $ml/min$', fontsize=20)
ax4.plot(t3, np.real(Q_flow)*10.**6*60.)
ax4.plot(time, np.real(q_pt)*10.**6*60., 'ro', markersize=10,
label = "{0:2.2e}".format(q_pt.real*10.**6*60.))
ax4.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax4.legend()
ax4.set_xlim((0, T))
ax5 = fig.add_subplot(gs[2, :])
#ax4.set_title('Common Carotid Artery')
ax5.set_xlabel(r'$t$', fontsize=20)
ax5.set_ylabel(r'Pressure, $P$ $Pa$', fontsize=20)
press = womersley.utils.reconstruct_pt(coef_p, time, T)
press5, t5 = womersley.utils.reconstruct2(coef_p, T)
dP = np.max(press5.real) - np.min(press5.real)
#print(np.max(press5.real), np.min(press5.real))
print('dP: {0} $Pa/s$'.format(dP/T))
ax5.plot(t5, np.real(press5))
ax5.plot(time, np.real(press), 'ro', markersize=10, label = "{0:2.2e}".format(press.real))
ax5.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax5.legend()
ax5.set_xlim((0, T))
#ensure all subplots fit bounding box
gs.tight_layout(fig)
| bsd-2-clause |
18padx08/PPTex | PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/hatch.py | 10 | 7132 | """
Contains a classes for generating hatch patterns.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import numpy as np
from matplotlib.path import Path
class HatchPatternBase:
"""
The base class for a hatch pattern.
"""
pass
class HorizontalHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = int((hatch.count('-') + hatch.count('+')) * density)
self.num_vertices = self.num_lines * 2
def set_vertices_and_codes(self, vertices, codes):
steps, stepsize = np.linspace(0.0, 1.0, self.num_lines, False,
retstep=True)
steps += stepsize / 2.
vertices[0::2, 0] = 0.0
vertices[0::2, 1] = steps
vertices[1::2, 0] = 1.0
vertices[1::2, 1] = steps
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class VerticalHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = int((hatch.count('|') + hatch.count('+')) * density)
self.num_vertices = self.num_lines * 2
def set_vertices_and_codes(self, vertices, codes):
steps, stepsize = np.linspace(0.0, 1.0, self.num_lines, False,
retstep=True)
steps += stepsize / 2.
vertices[0::2, 0] = steps
vertices[0::2, 1] = 0.0
vertices[1::2, 0] = steps
vertices[1::2, 1] = 1.0
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class NorthEastHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = int((hatch.count('/') + hatch.count('x') +
hatch.count('X')) * density)
if self.num_lines:
self.num_vertices = (self.num_lines + 1) * 2
else:
self.num_vertices = 0
def set_vertices_and_codes(self, vertices, codes):
steps = np.linspace(-0.5, 0.5, self.num_lines + 1, True)
vertices[0::2, 0] = 0.0 + steps
vertices[0::2, 1] = 0.0 - steps
vertices[1::2, 0] = 1.0 + steps
vertices[1::2, 1] = 1.0 - steps
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class SouthEastHatch(HatchPatternBase):
def __init__(self, hatch, density):
self.num_lines = int((hatch.count('\\') + hatch.count('x') +
hatch.count('X')) * density)
self.num_vertices = (self.num_lines + 1) * 2
if self.num_lines:
self.num_vertices = (self.num_lines + 1) * 2
else:
self.num_vertices = 0
def set_vertices_and_codes(self, vertices, codes):
steps = np.linspace(-0.5, 0.5, self.num_lines + 1, True)
vertices[0::2, 0] = 0.0 + steps
vertices[0::2, 1] = 1.0 + steps
vertices[1::2, 0] = 1.0 + steps
vertices[1::2, 1] = 0.0 + steps
codes[0::2] = Path.MOVETO
codes[1::2] = Path.LINETO
class Shapes(HatchPatternBase):
filled = False
def __init__(self, hatch, density):
if self.num_rows == 0:
self.num_shapes = 0
self.num_vertices = 0
else:
self.num_shapes = ((self.num_rows // 2 + 1) * (self.num_rows + 1) +
(self.num_rows // 2) * (self.num_rows))
self.num_vertices = (self.num_shapes *
len(self.shape_vertices) *
(self.filled and 1 or 2))
def set_vertices_and_codes(self, vertices, codes):
offset = 1.0 / self.num_rows
shape_vertices = self.shape_vertices * offset * self.size
if not self.filled:
inner_vertices = shape_vertices[::-1] * 0.9
shape_codes = self.shape_codes
shape_size = len(shape_vertices)
cursor = 0
for row in xrange(self.num_rows + 1):
if row % 2 == 0:
cols = np.linspace(0.0, 1.0, self.num_rows + 1, True)
else:
cols = np.linspace(offset / 2.0, 1.0 - offset / 2.0,
self.num_rows, True)
row_pos = row * offset
for col_pos in cols:
vertices[cursor:cursor + shape_size] = (shape_vertices +
(col_pos, row_pos))
codes[cursor:cursor + shape_size] = shape_codes
cursor += shape_size
if not self.filled:
vertices[cursor:cursor + shape_size] = (inner_vertices +
(col_pos, row_pos))
codes[cursor:cursor + shape_size] = shape_codes
cursor += shape_size
class Circles(Shapes):
def __init__(self, hatch, density):
path = Path.unit_circle()
self.shape_vertices = path.vertices
self.shape_codes = path.codes
Shapes.__init__(self, hatch, density)
class SmallCircles(Circles):
size = 0.2
def __init__(self, hatch, density):
self.num_rows = (hatch.count('o')) * density
Circles.__init__(self, hatch, density)
class LargeCircles(Circles):
size = 0.35
def __init__(self, hatch, density):
self.num_rows = (hatch.count('O')) * density
Circles.__init__(self, hatch, density)
class SmallFilledCircles(SmallCircles):
size = 0.1
filled = True
def __init__(self, hatch, density):
self.num_rows = (hatch.count('.')) * density
Circles.__init__(self, hatch, density)
class Stars(Shapes):
size = 1.0 / 3.0
filled = True
def __init__(self, hatch, density):
self.num_rows = (hatch.count('*')) * density
path = Path.unit_regular_star(5)
self.shape_vertices = path.vertices
self.shape_codes = np.ones(len(self.shape_vertices)) * Path.LINETO
self.shape_codes[0] = Path.MOVETO
Shapes.__init__(self, hatch, density)
_hatch_types = [
HorizontalHatch,
VerticalHatch,
NorthEastHatch,
SouthEastHatch,
SmallCircles,
LargeCircles,
SmallFilledCircles,
Stars
]
def get_path(hatchpattern, density=6):
"""
Given a hatch specifier, *hatchpattern*, generates Path to render
the hatch in a unit square. *density* is the number of lines per
unit square.
"""
density = int(density)
patterns = [hatch_type(hatchpattern, density)
for hatch_type in _hatch_types]
num_vertices = sum([pattern.num_vertices for pattern in patterns])
if num_vertices == 0:
return Path(np.empty((0, 2)))
vertices = np.empty((num_vertices, 2))
codes = np.empty((num_vertices,), np.uint8)
cursor = 0
for pattern in patterns:
if pattern.num_vertices != 0:
vertices_chunk = vertices[cursor:cursor + pattern.num_vertices]
codes_chunk = codes[cursor:cursor + pattern.num_vertices]
pattern.set_vertices_and_codes(vertices_chunk, codes_chunk)
cursor += pattern.num_vertices
return Path(vertices, codes)
| mit |
nrhine1/scikit-learn | sklearn/feature_extraction/text.py | 110 | 50157 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
greytip/data-science-utils | datascienceutils/clusteringModels.py | 1 | 3347 | from bokeh.layouts import gridplot
from sklearn import cluster
from sklearn.neighbors import kneighbors_graph
import numpy as np
import pandas as pd
import time
# Custom utils
from . import sklearnUtils as sku
from . import plotter
from . import utils
#TODO: add a way of weakening the discovered cluster structure and running again
# http://scilogs.spektrum.de/hlf/sometimes-noise-signals/
def is_cluster(dataframe, model_type='dbscan', batch_size=2):
if model_type == 'dbscan':
model_obj = cluster.DBSCAN(eps=.2)
elif model_type == 'MiniBatchKMeans':
assert batch_size, "Batch size mandatory"
model_obj = cluster.MiniBatchKMeans(n_clusters=batch_size)
else:
pass
model_obj.fit(X)
return model_obj.cluster_centers_
def cluster_analyze(dataframe):
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
plot_num = 1
# normalize dataset for easier parameter selection
X = sku.feature_scale_or_normalize(dataframe, dataframe.columns)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
plots = list()
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
new_df = pd.DataFrame(X)
plots.append(plotter.scatterplot(new_df, 0, 1, title='%s'%name))
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
centers_df = pd.DataFrame(centers)
center_colors = colors[:len(centers)]
plotter.scatterplot(centers_df, 0, 1,fill_color="r")
grid = gridplot(list(utils.chunks(plots,size=2)))
plotter.show(grid)
| gpl-3.0 |
laurent-george/bokeh | bokeh/charts/builder/tests/test_timeseries_builder.py | 33 | 2825 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import datetime
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import TimeSeries
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestTimeSeries(unittest.TestCase):
def test_supported_input(self):
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=1)
dts = [now + delta*i for i in range(5)]
xyvalues = OrderedDict({'Date': dts})
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
xyvaluesdf = pd.DataFrame(xyvalues)
groups = ['python', 'pypy', 'jython']
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
ts = create_chart(TimeSeries, _xy, index='Date')
builder = ts._builders[0]
self.assertEqual(builder._groups, groups)
assert_array_equal(builder._data['x_python'], dts)
assert_array_equal(builder._data['x_pypy'], dts)
assert_array_equal(builder._data['x_jython'], dts)
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['y_jython'], y_jython)
lvalues = [[2, 3, 7, 5, 26], [12, 33, 47, 15, 126], [22, 43, 10, 25, 26]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(TimeSeries, _xy, index=dts)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['x_0'], dts)
assert_array_equal(builder._data['x_1'], dts)
assert_array_equal(builder._data['x_2'], dts)
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
| bsd-3-clause |
bokeh/bokeh | tests/unit/bokeh/core/property/test_container.py | 1 | 9429 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2021, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# External imports
import numpy as np
# Bokeh imports
from bokeh._testing.util.api import verify_all
from bokeh.core.properties import (
Float,
Instance,
Int,
String,
)
from _util_property import _TestHasProps, _TestModel
# Module under test
import bokeh.core.property.container as bcpc # isort:skip
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'Array',
'ColumnData',
'Dict',
'List',
'RelativeDelta',
'RestrictedDict',
'Seq',
'Tuple',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
# TODO (bev) class Test_ColumnData
# TODO (bev) class Test_RelativeDelta
class Test_Array:
def test_init(self) -> None:
with pytest.raises(TypeError):
bcpc.Array()
def test_valid(self) -> None:
prop = bcpc.Array(Float)
assert prop.is_valid(np.array([1,2,3]))
def test_invalid(self) -> None:
prop = bcpc.Array(Float)
assert not prop.is_valid(None)
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(0)
assert not prop.is_valid(1)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
def test_has_ref(self) -> None:
prop = bcpc.Array(Float)
assert not prop.has_ref
def test_str(self) -> None:
prop = bcpc.Array(Float)
assert str(prop) == "Array(Float)"
class Test_Dict:
def test_init(self) -> None:
with pytest.raises(TypeError):
bcpc.Dict()
def test_valid(self) -> None:
prop = bcpc.Dict(String, bcpc.List(Int))
assert prop.is_valid({})
assert prop.is_valid({"foo": [1,2,3]})
def test_invalid(self) -> None:
prop = bcpc.Dict(String, bcpc.List(Int))
assert not prop.is_valid(None)
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(0)
assert not prop.is_valid(1)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({"foo": [1,2,3.5]})
assert not prop.is_valid(np.array([1,2,3]))
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
def test_has_ref(self) -> None:
prop = bcpc.Dict(String, Int)
assert not prop.has_ref
prop = bcpc.Dict(String, Instance(_TestModel))
assert prop.has_ref
def test_str(self) -> None:
prop = bcpc.Dict(String, Int)
assert str(prop) == "Dict(String, Int)"
class Test_List:
def test_init(self) -> None:
with pytest.raises(TypeError):
bcpc.List()
def test_valid(self) -> None:
prop = bcpc.List(Int)
assert prop.is_valid([])
assert prop.is_valid([1,2,3])
def test_invalid(self) -> None:
prop = bcpc.List(Int)
assert not prop.is_valid(None)
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(0)
assert not prop.is_valid(1)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid([1,2,3.5])
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid({})
assert not prop.is_valid(np.array([1,2,3]))
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
def test_has_ref(self) -> None:
prop = bcpc.List(Int)
assert not prop.has_ref
prop = bcpc.List(Instance(_TestModel))
assert prop.has_ref
def test_str(self) -> None:
prop = bcpc.List(Int)
assert str(prop) == "List(Int)"
class Test_Seq:
def test_init(self) -> None:
with pytest.raises(TypeError):
bcpc.Seq()
def test_valid(self) -> None:
prop = bcpc.Seq(Int)
assert prop.is_valid(())
assert prop.is_valid([])
assert prop.is_valid(np.array([1,2,3]))
assert prop.is_valid((1, 2))
assert prop.is_valid([1, 2])
assert prop.is_valid(np.array([1, 2]))
def test_invalid(self) -> None:
prop = bcpc.Seq(Int)
assert not prop.is_valid(None)
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(0)
assert not prop.is_valid(1)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(set())
assert not prop.is_valid({})
assert not prop.is_valid({1, 2})
assert not prop.is_valid({1: 2})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
def test_with_pandas_valid(self, pd) -> None:
prop = bcpc.Seq(Int)
df = pd.DataFrame([1, 2])
assert prop.is_valid(df.index)
assert prop.is_valid(df.iloc[0])
def test_has_ref(self) -> None:
prop = bcpc.Seq(Int)
assert not prop.has_ref
prop = bcpc.Seq(Instance(_TestModel))
assert prop.has_ref
def test_str(self) -> None:
prop = bcpc.Seq(Int)
assert str(prop) == "Seq(Int)"
class Test_Tuple:
def test_Tuple(self) -> None:
with pytest.raises(TypeError):
bcpc.Tuple()
with pytest.raises(TypeError):
bcpc.Tuple(Int)
def test_valid(self) -> None:
prop = bcpc.Tuple(Int, String, bcpc.List(Int))
assert prop.is_valid((1, "", [1, 2, 3]))
def test_invalid(self) -> None:
prop = bcpc.Tuple(Int, String, bcpc.List(Int))
assert not prop.is_valid(None)
assert not prop.is_valid(False)
assert not prop.is_valid(True)
assert not prop.is_valid(0)
assert not prop.is_valid(1)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(np.array([1,2,3]))
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
assert not prop.is_valid((1.0, "", [1, 2, 3]))
assert not prop.is_valid((1, True, [1, 2, 3]))
assert not prop.is_valid((1, "", (1, 2, 3)))
assert not prop.is_valid((1, "", [1, 2, "xyz"]))
def test_has_ref(self) -> None:
prop = bcpc.Tuple(Int, Int)
assert not prop.has_ref
prop = bcpc.Tuple(Int, Instance(_TestModel))
assert prop.has_ref
def test_str(self) -> None:
prop = bcpc.Tuple(Int, Int)
assert str(prop) == "Tuple(Int, Int)"
class Test_RestrictedDict:
def test_valid(self) -> None:
prop = bcpc.RestrictedDict(String, bcpc.List(Int), disallow=("disallowed_key_1", "disallowed_key_2"))
assert prop.is_valid({"non_disallowed_key_1": [1,2,3]})
assert prop.is_valid({"non_disallowed_key_2": [1,2,3]})
def test_invalid(self) -> None:
prop = bcpc.RestrictedDict(String, bcpc.List(Int), disallow=("disallowed_key_1", "disallowed_key_2"))
assert not prop.is_valid({"disallowed_key_1": [1,2,3]})
assert not prop.is_valid({"disallowed_key_2": [1,2,3]})
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpc, ALL)
| bsd-3-clause |
AnasGhrab/scikit-learn | sklearn/metrics/tests/test_common.py | 83 | 41144 | from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not coverd by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": r2_score,
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_MULTICLASS = [
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
# Those metrics don't support multiclass outputs
"average_precision_score", "weighted_average_precision_score",
"micro_average_precision_score", "macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_average_precision_score",
"roc_auc_score", "micro_roc_auc", "weighted_roc_auc",
"macro_roc_auc", "samples_roc_auc",
"coverage_error",
"brier_score_loss",
"label_ranking_loss",
]
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Treshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "weighted_f1_score", "micro_f1_score", "macro_f1_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f2_score", "weighted_precision_score",
"weighted_recall_score",
"micro_f0.5_score", "micro_f2_score", "micro_precision_score",
"micro_recall_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"cohen_kappa_score",
"confusion_matrix",
"hamming_loss",
"matthews_corrcoef_score",
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(NOT_SYMMETRIC_METRICS,
THRESHOLDED_METRICS,
METRIC_UNDEFINED_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss",
"brier_score_loss"):
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if name in METRIC_UNDEFINED_MULTICLASS or name in THRESHOLDED_METRICS:
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 += [0]*n_classes
y2 += [0]*n_classes
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
def test_raise_value_error_multilabel_sequences():
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg="Weighted scores for array and list sample_weight input are "
"not equal (%f != %f) for %s" % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# binary output
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multilabel indicator
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.shape[1] > 0:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
| bsd-3-clause |
spbguru/repo1 | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_fltkagg.py | 69 | 20839 | """
A backend for FLTK
Copyright: Gregory Lielens, Free Field Technologies SA and
John D. Hunter 2004
This code is released under the matplotlib license
"""
from __future__ import division
import os, sys, math
import fltk as Fltk
from backend_agg import FigureCanvasAgg
import os.path
import matplotlib
from matplotlib import rcParams, verbose
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import \
RendererBase, GraphicsContextBase, FigureManagerBase, FigureCanvasBase,\
NavigationToolbar2, cursors
from matplotlib.figure import Figure
from matplotlib._pylab_helpers import Gcf
import matplotlib.windowing as windowing
from matplotlib.widgets import SubplotTool
import thread,time
Fl_running=thread.allocate_lock()
def Fltk_run_interactive():
global Fl_running
if Fl_running.acquire(0):
while True:
Fltk.Fl.check()
time.sleep(0.005)
else:
print "fl loop already running"
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 75
cursord= {
cursors.HAND: Fltk.FL_CURSOR_HAND,
cursors.POINTER: Fltk.FL_CURSOR_ARROW,
cursors.SELECT_REGION: Fltk.FL_CURSOR_CROSS,
cursors.MOVE: Fltk.FL_CURSOR_MOVE
}
special_key={
Fltk.FL_Shift_R:'shift',
Fltk.FL_Shift_L:'shift',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
Fltk.FL_Control_R:'control',
Fltk.FL_Control_L:'control',
65515:'win',
65516:'win',
}
def error_msg_fltk(msg, parent=None):
Fltk.fl_message(msg)
def draw_if_interactive():
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def ishow():
"""
Show all the figures and enter the fltk mainloop in another thread
This allows to keep hand in interractive python session
Warning: does not work under windows
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
if show._needmain:
thread.start_new_thread(Fltk_run_interactive,())
show._needmain = False
def show():
"""
Show all the figures and enter the fltk mainloop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.show()
#mainloop, if an fltk program exist no need to call that
#threaded (and interractive) version
if show._needmain:
Fltk.Fl.run()
show._needmain = False
show._needmain = True
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
window = Fltk.Fl_Double_Window(10,10,30,30)
canvas = FigureCanvasFltkAgg(figure)
window.end()
window.show()
window.make_current()
figManager = FigureManagerFltkAgg(canvas, num, window)
if matplotlib.is_interactive():
figManager.show()
return figManager
class FltkCanvas(Fltk.Fl_Widget):
def __init__(self,x,y,w,h,l,source):
Fltk.Fl_Widget.__init__(self, 0, 0, w, h, "canvas")
self._source=source
self._oldsize=(None,None)
self._draw_overlay = False
self._button = None
self._key = None
def draw(self):
newsize=(self.w(),self.h())
if(self._oldsize !=newsize):
self._oldsize =newsize
self._source.resize(newsize)
self._source.draw()
t1,t2,w,h = self._source.figure.bbox.bounds
Fltk.fl_draw_image(self._source.buffer_rgba(0,0),0,0,int(w),int(h),4,0)
self.redraw()
def blit(self,bbox=None):
if bbox is None:
t1,t2,w,h = self._source.figure.bbox.bounds
else:
t1o,t2o,wo,ho = self._source.figure.bbox.bounds
t1,t2,w,h = bbox.bounds
x,y=int(t1),int(t2)
Fltk.fl_draw_image(self._source.buffer_rgba(x,y),x,y,int(w),int(h),4,int(wo)*4)
#self.redraw()
def handle(self, event):
x=Fltk.Fl.event_x()
y=Fltk.Fl.event_y()
yf=self._source.figure.bbox.height() - y
if event == Fltk.FL_FOCUS or event == Fltk.FL_UNFOCUS:
return 1
elif event == Fltk.FL_KEYDOWN:
ikey= Fltk.Fl.event_key()
if(ikey<=255):
self._key=chr(ikey)
else:
try:
self._key=special_key[ikey]
except:
self._key=None
FigureCanvasBase.key_press_event(self._source, self._key)
return 1
elif event == Fltk.FL_KEYUP:
FigureCanvasBase.key_release_event(self._source, self._key)
self._key=None
elif event == Fltk.FL_PUSH:
self.window().make_current()
if Fltk.Fl.event_button1():
self._button = 1
elif Fltk.Fl.event_button2():
self._button = 2
elif Fltk.Fl.event_button3():
self._button = 3
else:
self._button = None
if self._draw_overlay:
self._oldx=x
self._oldy=y
if Fltk.Fl.event_clicks():
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
else:
FigureCanvasBase.button_press_event(self._source, x, yf, self._button)
return 1
elif event == Fltk.FL_ENTER:
self.take_focus()
return 1
elif event == Fltk.FL_LEAVE:
return 1
elif event == Fltk.FL_MOVE:
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_DRAG:
self.window().make_current()
if self._draw_overlay:
self._dx=Fltk.Fl.event_x()-self._oldx
self._dy=Fltk.Fl.event_y()-self._oldy
Fltk.fl_overlay_rect(self._oldx,self._oldy,self._dx,self._dy)
FigureCanvasBase.motion_notify_event(self._source, x, yf)
return 1
elif event == Fltk.FL_RELEASE:
self.window().make_current()
if self._draw_overlay:
Fltk.fl_overlay_clear()
FigureCanvasBase.button_release_event(self._source, x, yf, self._button)
self._button = None
return 1
return 0
class FigureCanvasFltkAgg(FigureCanvasAgg):
def __init__(self, figure):
FigureCanvasAgg.__init__(self,figure)
t1,t2,w,h = self.figure.bbox.bounds
w, h = int(w), int(h)
self.canvas=FltkCanvas(0, 0, w, h, "canvas",self)
#self.draw()
def resize(self,size):
w, h = size
# compute desired figure size in inches
dpival = self.figure.dpi.get()
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch,hinch)
def draw(self):
FigureCanvasAgg.draw(self)
self.canvas.redraw()
def blit(self,bbox):
self.canvas.blit(bbox)
show = draw
def widget(self):
return self.canvas
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
def destroy_figure(ptr,figman):
figman.window.hide()
Gcf.destroy(figman._num)
class FigureManagerFltkAgg(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The fltk.Toolbar
window : The fltk.Window
"""
def __init__(self, canvas, num, window):
FigureManagerBase.__init__(self, canvas, num)
#Fltk container window
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self.window = window
self.window.size(w,h+30)
self.window_title="Figure %d" % num
self.window.label(self.window_title)
self.window.size_range(350,200)
self.window.callback(destroy_figure,self)
self.canvas = canvas
self._num = num
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbar( canvas, self )
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2FltkAgg( canvas, self )
else:
self.toolbar = None
self.window.add_resizable(canvas.widget())
if self.toolbar:
self.window.add(self.toolbar.widget())
self.toolbar.update()
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def resize(self, event):
width, height = event.width, event.height
self.toolbar.configure(width=width) # , height=height)
def show(self):
_focus = windowing.FocusManager()
self.canvas.draw()
self.window.redraw()
def set_window_title(self, title):
self.window_title=title
self.window.label(title)
class AxisMenu:
def __init__(self, toolbar):
self.toolbar=toolbar
self._naxes = toolbar.naxes
self._mbutton = Fltk.Fl_Menu_Button(0,0,50,10,"Axes")
self._mbutton.add("Select All",0,select_all,self,0)
self._mbutton.add("Invert All",0,invert_all,self,Fltk.FL_MENU_DIVIDER)
self._axis_txt=[]
self._axis_var=[]
for i in range(self._naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
def adjust(self, naxes):
if self._naxes < naxes:
for i in range(self._naxes, naxes):
self._axis_txt.append("Axis %d" % (i+1))
self._mbutton.add(self._axis_txt[i],0,set_active,self,Fltk.FL_MENU_TOGGLE)
for i in range(self._naxes, naxes):
self._axis_var.append(self._mbutton.find_item(self._axis_txt[i]))
self._axis_var[i].set()
elif self._naxes > naxes:
for i in range(self._naxes-1, naxes-1, -1):
self._mbutton.remove(i+2)
if(naxes):
self._axis_var=self._axis_var[:naxes-1]
self._axis_txt=self._axis_txt[:naxes-1]
else:
self._axis_var=[]
self._axis_txt=[]
self._naxes = naxes
set_active(0,self)
def widget(self):
return self._mbutton
def get_indices(self):
a = [i for i in range(len(self._axis_var)) if self._axis_var[i].value()]
return a
def set_active(ptr,amenu):
amenu.toolbar.set_active(amenu.get_indices())
def invert_all(ptr,amenu):
for a in amenu._axis_var:
if not a.value(): a.set()
set_active(ptr,amenu)
def select_all(ptr,amenu):
for a in amenu._axis_var:
a.set()
set_active(ptr,amenu)
class FLTKButton:
def __init__(self, text, file, command,argument,type="classic"):
file = os.path.join(rcParams['datapath'], 'images', file)
self.im = Fltk.Fl_PNM_Image(file)
size=26
if type=="repeat":
self.b = Fltk.Fl_Repeat_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="classic":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="light":
self.b = Fltk.Fl_Light_Button(0,0,size+20,10)
self.b.box(Fltk.FL_THIN_UP_BOX)
elif type=="pushed":
self.b = Fltk.Fl_Button(0,0,size,10)
self.b.box(Fltk.FL_UP_BOX)
self.b.down_box(Fltk.FL_DOWN_BOX)
self.b.type(Fltk.FL_TOGGLE_BUTTON)
self.tooltiptext=text+" "
self.b.tooltip(self.tooltiptext)
self.b.callback(command,argument)
self.b.image(self.im)
self.b.deimage(self.im)
self.type=type
def widget(self):
return self.b
class NavigationToolbar:
"""
Public attriubutes
canvas - the FigureCanvas (FigureCanvasFltkAgg = customised fltk.Widget)
"""
def __init__(self, canvas, figman):
#xmin, xmax = canvas.figure.bbox.intervalx().get_bounds()
#height, width = 50, xmax-xmin
self.canvas = canvas
self.figman = figman
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bLeft = FLTKButton(
text="Left", file="stock_left.ppm",
command=pan,argument=(self,1,'x'),type="repeat")
self.bRight = FLTKButton(
text="Right", file="stock_right.ppm",
command=pan,argument=(self,-1,'x'),type="repeat")
self.bZoomInX = FLTKButton(
text="ZoomInX",file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'x'),type="repeat")
self.bZoomOutX = FLTKButton(
text="ZoomOutX", file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'x'),type="repeat")
self.bUp = FLTKButton(
text="Up", file="stock_up.ppm",
command=pan,argument=(self,1,'y'),type="repeat")
self.bDown = FLTKButton(
text="Down", file="stock_down.ppm",
command=pan,argument=(self,-1,'y'),type="repeat")
self.bZoomInY = FLTKButton(
text="ZoomInY", file="stock_zoom-in.ppm",
command=zoom,argument=(self,1,'y'),type="repeat")
self.bZoomOutY = FLTKButton(
text="ZoomOutY",file="stock_zoom-out.ppm",
command=zoom, argument=(self,-1,'y'),type="repeat")
self.bSave = FLTKButton(
text="Save", file="stock_save_as.ppm",
command=save_figure, argument=self)
self._group.end()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
def pan(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.panx(direction)
else:
a.pany(direction)
base.figman.show()
def zoom(ptr, arg):
base,direction,axe=arg
for a in base._active:
if(axe=='x'):
a.zoomx(direction)
else:
a.zoomy(direction)
base.figman.show()
def save_figure(ptr,base):
filetypes = base.canvas.get_supported_filetypes()
default_filetype = base.canvas.get_default_filetype()
sorted_filetypes = filetypes.items()
sorted_filetypes.sort()
selected_filter = 0
filters = []
for i, (ext, name) in enumerate(sorted_filetypes):
filter = '%s (*.%s)' % (name, ext)
filters.append(filter)
if ext == default_filetype:
selected_filter = i
filters = '\t'.join(filters)
file_chooser=base._fc
file_chooser.filter(filters)
file_chooser.filter_value(selected_filter)
file_chooser.show()
while file_chooser.visible() :
Fltk.Fl.wait()
fname=None
if(file_chooser.count() and file_chooser.value(0) != None):
fname=""
(status,fname)=Fltk.fl_filename_absolute(fname, 1024, file_chooser.value(0))
if fname is None: # Cancel
return
#start from last directory
lastDir = os.path.dirname(fname)
file_chooser.directory(lastDir)
format = sorted_filetypes[file_chooser.filter_value()][0]
try:
base.canvas.print_figure(fname, format=format)
except IOError, msg:
err = '\n'.join(map(str, msg))
msg = 'Failed to save %s: Error msg was\n\n%s' % (
fname, err)
error_msg_fltk(msg)
class NavigationToolbar2FltkAgg(NavigationToolbar2):
"""
Public attriubutes
canvas - the FigureCanvas
figman - the Figure manager
"""
def __init__(self, canvas, figman):
self.canvas = canvas
self.figman = figman
NavigationToolbar2.__init__(self, canvas)
self.pan_selected=False
self.zoom_selected=False
def set_cursor(self, cursor):
Fltk.fl_cursor(cursord[cursor],Fltk.FL_BLACK,Fltk.FL_WHITE)
def dynamic_update(self):
self.canvas.draw()
def pan(self,*args):
self.pan_selected=not self.pan_selected
self.zoom_selected = False
self.canvas.canvas._draw_overlay= False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.pan(self,args)
def zoom(self,*args):
self.zoom_selected=not self.zoom_selected
self.canvas.canvas._draw_overlay=self.zoom_selected
self.pan_selected = False
if self.pan_selected:
self.bPan.widget().value(1)
else:
self.bPan.widget().value(0)
if self.zoom_selected:
self.bZoom.widget().value(1)
else:
self.bZoom.widget().value(0)
NavigationToolbar2.zoom(self,args)
def configure_subplots(self,*args):
window = Fltk.Fl_Double_Window(100,100,480,240)
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasFltkAgg(toolfig)
window.end()
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
window.show()
canvas.show()
def _init_toolbar(self):
Fltk.Fl_File_Icon.load_system_icons()
self._fc = Fltk.Fl_File_Chooser( ".", "*", Fltk.Fl_File_Chooser.CREATE, "Save Figure" )
self._fc.hide()
t1,t2,w,h = self.canvas.figure.bbox.bounds
w, h = int(w), int(h)
self._group = Fltk.Fl_Pack(0,h+2,1000,26)
self._group.type(Fltk.FL_HORIZONTAL)
self._axes=self.canvas.figure.axes
self.naxes = len(self._axes)
self.omenu = AxisMenu( toolbar=self)
self.bHome = FLTKButton(
text="Home", file="home.ppm",
command=self.home,argument=self)
self.bBack = FLTKButton(
text="Back", file="back.ppm",
command=self.back,argument=self)
self.bForward = FLTKButton(
text="Forward", file="forward.ppm",
command=self.forward,argument=self)
self.bPan = FLTKButton(
text="Pan/Zoom",file="move.ppm",
command=self.pan,argument=self,type="pushed")
self.bZoom = FLTKButton(
text="Zoom to rectangle",file="zoom_to_rect.ppm",
command=self.zoom,argument=self,type="pushed")
self.bsubplot = FLTKButton( text="Configure Subplots", file="subplots.ppm",
command = self.configure_subplots,argument=self,type="pushed")
self.bSave = FLTKButton(
text="Save", file="filesave.ppm",
command=save_figure, argument=self)
self._group.end()
self.message = Fltk.Fl_Output(0,0,w,8)
self._group.add_resizable(self.message)
self.update()
def widget(self):
return self._group
def close(self):
Gcf.destroy(self.figman._num)
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def update(self):
self._axes = self.canvas.figure.axes
naxes = len(self._axes)
self.omenu.adjust(naxes)
NavigationToolbar2.update(self)
def set_message(self, s):
self.message.value(s)
FigureManager = FigureManagerFltkAgg
| gpl-3.0 |
xuewei4d/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 23 | 3379 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the :func:`~sklearn.datasets.make_multilabel_classification`
dataset generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
mne-tools/mne-python | tutorials/stats-sensor-space/50_cluster_between_time_freq.py | 18 | 4878 | """
=========================================================================
Non-parametric between conditions cluster statistic on single trial power
=========================================================================
This script shows how to compare clusters in time-frequency
power estimates between conditions. It uses a non-parametric
statistical procedure based on permutations and cluster
level statistics.
The procedure consists of:
- extracting epochs for 2 conditions
- compute single trial power estimates
- baseline line correct the power estimates (power ratios)
- compute stats to see if the power estimates are significantly different
between conditions.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
tmin, tmax = -0.2, 0.5
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
ch_name = 'MEG 1332' # restrict example to one channel
# Load condition 1
reject = dict(grad=4000e-13, eog=150e-6)
event_id = 1
epochs_condition_1 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_1.pick_channels([ch_name])
# Load condition 2
event_id = 2
epochs_condition_2 = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=reject, preload=True)
epochs_condition_2.pick_channels([ch_name])
###############################################################################
# Factor to downsample the temporal dimension of the TFR computed by
# tfr_morlet. Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly comptuational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 2
freqs = np.arange(7, 30, 3) # define frequencies of interest
n_cycles = 1.5
tfr_epochs_1 = tfr_morlet(epochs_condition_1, freqs,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_2 = tfr_morlet(epochs_condition_2, freqs,
n_cycles=n_cycles, decim=decim,
return_itc=False, average=False)
tfr_epochs_1.apply_baseline(mode='ratio', baseline=(None, 0))
tfr_epochs_2.apply_baseline(mode='ratio', baseline=(None, 0))
epochs_power_1 = tfr_epochs_1.data[:, 0, :, :] # only 1 channel as 3D matrix
epochs_power_2 = tfr_epochs_2.data[:, 0, :, :] # only 1 channel as 3D matrix
###############################################################################
# Compute statistic
# -----------------
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([epochs_power_1, epochs_power_2], out_type='mask',
n_permutations=100, threshold=threshold, tail=0)
###############################################################################
# View time-frequency plots
# -------------------------
times = 1e3 * epochs_condition_1.times # change unit to ms
evoked_condition_1 = epochs_condition_1.average()
evoked_condition_2 = epochs_condition_2.average()
plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
plt.imshow(T_obs,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', cmap='gray')
plt.imshow(T_obs_plot,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', cmap='RdBu_r')
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Induced power (%s)' % ch_name)
ax2 = plt.subplot(2, 1, 2)
evoked_contrast = mne.combine_evoked([evoked_condition_1, evoked_condition_2],
weights=[1, -1])
evoked_contrast.plot(axes=ax2, time_unit='s')
plt.show()
| bsd-3-clause |
SGenheden/Scripts | Projects/Gpcr/gpcr_plot_resjointcontacts.py | 1 | 3408 | # Author: Samuel Genheden samuel.genheden@gmail.com
"""
Program to plot residue joint contact probability
Examples
--------
gpcr_plot_rescontacts.py -f r1_md3_en_fit_joint.npz -m ohburr -l oh --mol b2
"""
import os
import argparse
import sys
import numpy as np
import matplotlib
if not "DISPLAY" in os.environ or os.environ["DISPLAY"] == "" :
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pycontacts
import gpcr_lib
from sgenlib import colors
from sgenlib import pdb
def _resjointcontacts(filename,label,mat,repeats,out,mol) :
"""
Main analysis routine
Parameters
----------
filename : string
file to analyse
label : string
label for the group
mat : string
matrix identifier
repeats : list of string
replacement pattern for multiple repeats
out : string
output prefix
mol : string
protein identifier
"""
# Load the protein template to obtain residue information
template = gpcr_lib.load_template(mol)
residues = np.array(template.residues)
residues0 = np.arange(1,residues.shape[0]+1)
codes = np.array(template.codes)
names = np.array(template.names)
pcontacts = []
npz = np.load(filename)
pjoint0 = npz["joint"+mat]
pjoint = np.zeros([len(repeats),pjoint0.shape[0],pjoint0.shape[0]])
pjoint[0,:,:] = pjoint0
# Do the same for multiple repeats and average over them
if repeats is not None :
for ri,r in enumerate(repeats[1:],1) :
filename2 = filename.replace(repeats[0],r)
npz = np.load(filename2)
pjoint[ri,:,:] = npz["joint"+mat]
pjoint_std = pjoint.std(axis=0)/np.sqrt(len(repeats))
f2d = plt.figure(2,tight_layout=True)
C = gpcr_lib.draw_joint2d(f2d.gca(),residues0,residues,pjoint_std)
f2d.colorbar(C)
f2d.savefig("%s_%s_2d_std.png"%(out,label),format="png")
# Draw a 2D residue-residue joint probability plot
f2d = plt.figure(1,tight_layout=True)
C = gpcr_lib.draw_joint2d(f2d.gca(),residues0,residues,pjoint.mean(axis=0))
f2d.colorbar(C)
f2d.gca().text(1.04,1.01,"p(A,B)",transform=f2d.gca().transAxes)
f2d.savefig("%s_%s_2d.png"%(out,label),format="png")
f2d = plt.figure(3,tight_layout=True)
C = gpcr_lib.draw_joint2d(f2d.gca(),residues0,residues,pjoint.mean(axis=0),logit=True)
f2d.colorbar(C)
f2d.gca().text(1.02,1.02,"ln p(A,B)",transform=f2d.gca().transAxes)
f2d.savefig("%s_%s_2d_log.png"%(out,label),format="png")
if __name__ == '__main__' :
# Setup a parser of the command-line arguments
parser = argparse.ArgumentParser(description="Plotting residue contacts")
parser.add_argument('-f','--files',nargs='+',help="a list of input files.",default=[])
parser.add_argument('-l','--labels',nargs='+',help="a label for each input file.",default=[])
parser.add_argument('-m','--mat',nargs='+',help="the matrix to plot for each input file.",default=[])
parser.add_argument('-o','--out',help="the output prefix.",default="rescontacts")
parser.add_argument('--mol',choices=["b2","a2a","b2_a","a2a_a"],help="the protein molecules, should be either 'b2' or 'a2a'",default="b2")
parser.add_argument('--repeats',nargs="+",help="replacement pattern for multiple repeats",default=["r1_","r2_","r3_","r4_","r5_"])
args = parser.parse_args()
for filename,label,mat in zip(args.files,args.labels,args.mat) :
_resjointcontacts(filename,label,mat,args.repeats,args.out,args.mol)
| mit |
crickert1234/ParamAP | ParamAP.py | 1 | 51951 | #!/usr/bin/env python3
'''
ParamAP.py (parametrization of sinoatrial myocyte action potentials)
Copyright (C) 2018 Christian Rickert <christian.rickert@ucdenver.edu>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
# imports
# runtime
import fnmatch
import functools
import gc
import math
import os
import sys
# numpy
import numpy as np
# scipy
import scipy.signal as sp_sig
import scipy.spatial as sp_spat
import scipy.stats as sp_stat
# matplotlib
import matplotlib.backends.backend_pdf as mpbp
import matplotlib.pyplot as mpp
# variables
APEXT = 0.5 # margin of ap extension (%)
FFRAME = 0.5 # time interval for filtering (ms)
POLYNOM = 2 # polynomial order used for filtering
# functions
def askboolean(dlabel="custom boolean", dval=True):
"""Returns a boolean provided by the user."""
if dval: # True
dstr = "Y/n"
else: # False
dstr = "y/N"
while True:
uchoice = input(dlabel + " [" + dstr + "]: ") or dstr
if uchoice.lower().startswith("y") and not uchoice.endswith("N"):
print("True\n")
return True # break
elif (uchoice.endswith("N") and not uchoice.startswith("Y")) or uchoice.lower().startswith("n"):
print("False\n")
return False # break
else:
continue
def askext(dlabel="custom extension", dext='atf'):
"""Returns a file extention provided by the user."""
while True:
uext = str(input("Enter " + dlabel + " [" + dext + "]: ")).lower() or dext
if uext not in ["dat", "log", "pdf"] and len(uext) == 3:
print(uext + "\n")
return uext # break
else:
print("Invalid file extension!\n")
continue
def askunit(dlabel="custom unit", daxis='', dunit=''):
"""Returns a unit provided by the user."""
while True:
uunit = input("Enter " + dlabel + " [" + dunit + "]: ") or dunit
if daxis in ["x", "X"]:
if uunit in ["ms", "s"]:
print(uunit + "\n")
return uunit # break
else:
print("Invalid unit for X-axis!\n")
continue
elif daxis in ["y", "Y"]:
if uunit in ["mV", "V"]:
print(uunit + "\n")
return uunit # break
else:
print("Invalid unit for Y-axis!\n")
continue
def askvalue(dlabel="custom value", dval=1.0, dunit="", dtype="float"):
"""Returns a value provided by the user."""
while True:
try:
uval = float(input("Enter " + dlabel + " [" + str(dval) + "]" + dunit + ": ") or dval)
break
except ValueError:
print("Non-numerical input!\n")
continue
if dtype == "float": # default
pass
elif dtype == "int":
uval = int(round(uval))
print(str(uval) + "\n")
return uval
def getfiles(path='/home/user/', pattern='*'):
"""Returns all files in path matching the pattern."""
abspath = os.path.abspath(path)
for fileobject in os.listdir(abspath):
filename = os.path.join(abspath, fileobject)
if os.path.isfile(filename) and fnmatch.fnmatchcase(fileobject, pattern):
yield os.path.join(abspath, filename)
def getneighbors(origin_i=np.empty(0), vicinity=np.empty(0), origin_x=np.empty(0), origin_y=np.empty(0), hwidth=float("inf"), fheight=0.0, limit=None, within=float("inf"), bads=False):
"""Returns all nearest-neighbors in ascending (i.e. increasing distance) order."""
neighbors = np.zeros(0)
badorigins = np.zeros(0)
vicinity_kdt = sp_spat.KDTree(list(zip(vicinity, np.zeros(vicinity.size)))) # KDTree for the nearest neighbors search
for origin in origin_i:
neighbor_left, neighbor_right = False, False
for position in vicinity_kdt.query([origin, 0.0], k=limit, distance_upper_bound=within)[1]: # return nearest neighbors in ascending order
if not neighbor_left or not neighbor_right:
neighbor = vicinity[position]
if (abs(origin_x[origin]-origin_x[neighbor]) <= hwidth) and (abs(origin_y[origin]-origin_y[neighbor]) >= fheight): # relative criteria for minima left and right of maximum
if not neighbor_left and (neighbor < origin): # criteria for minimum left of maximum only
neighbors = np.append(neighbors, neighbor)
neighbor_left = True
elif not neighbor_right and (neighbor > origin): # criteria for minimum right of maximum only
neighbors = np.append(neighbors, neighbor)
neighbor_right = True
else: # odd origins with missing neighbors
badorigins = np.append(badorigins, np.argwhere(origin == origin_i))
neighbors = np.sort(np.unique(neighbors)) # unique elements only
if neighbors.size <= 1: # missing neighbor
if neighbor_left:
neighbors = np.append(neighbors, 0.0) # append neighbor_right
if neighbor_right:
neighbors = np.insert(neighbors, 0, 0.0) # insert neighbor_left
badorigins = np.sort(np.unique(badorigins))
return (neighbors.astype(int), badorigins.astype(int)) if bads else (neighbors.astype(int))
def getrunavg(xdata=np.empty(0), xinterval=FFRAME, porder=POLYNOM):
"""Returns the running average count based on a given time interval."""
tmprun = int(round(xinterval/(xdata[1]-xdata[0])))
while tmprun <= porder: # prevents filtering
tmprun += 1
return (tmprun) if tmprun % 2 else (tmprun + 1) # odd number
def getpartitions(pstart=0, pstop=100, pint=5, pmin=10):
"""Returns a partition list in percent to segment an interval."""
plist = []
for part_l in list(range(int(pstart), int(pstop)+int(pint), int(pint))):
for part_r in list(range(int(pstart), int(pstop)+int(pint), int(pint))):
if part_r > part_l and part_r-part_l >= int(pmin): # no duplication or empty partitions, minimum size
plist.append([part_l, part_r]) # return statement removes the outmost list
return plist
def getbestlinearfit(xaxis=np.empty(0), yaxis=np.empty(0), xmin=0.0, xmax=1.0, pstart=0, pstop=100, pint=1, pmin=10):
"""Returns the best linear fit from segments of an interval."""
bst_r = 0 # regression coefficient
seg_i = np.argwhere((xaxis >= xmin) & (xaxis <= xmax)).ravel() # analyzing partial segment only
seg_t = xaxis[seg_i[-1]]-xaxis[seg_i[0]] # full interval from partial segment
seg_m, seg_n, seg_r = 0.0, 0.0, 0.0
for partition in getpartitions(pstart, pstop, pint, pmin):
seg_i = np.argwhere((xaxis >= (avgfmin_x[0]+(seg_t*partition[0]/100))) & (xaxis <= (avgfmin_x[0]+(seg_t*partition[1]/100)))).ravel() # 'ravel()' required for 'sp_stat.linregress()'
seg_x = xaxis[seg_i]
seg_y = yaxis[seg_i]
seg_m, seg_n, seg_r = sp_stat.linregress(seg_x, seg_y)[0:3] # tuple unpacking and linear regression of partial ap segment
if math.pow(seg_r, 2.0) >= math.pow(bst_r, 2.0):
bst_m, bst_n, bst_r = seg_m, seg_n, seg_r
bst_i, bst_x, bst_y = seg_i, seg_x, seg_y
# print(partition[0], " - ", partition[1], " : ", str(partition[1]-partition[0]), " ~ ", str(math.pow(bst_r, 2.0))) # shows progress, but is slow!
return (bst_i, bst_x, bst_y, bst_m, bst_n, bst_r)
def mpp_setup(title='Plot title', xlabel='Time [ ]', ylabel='Voltage [ ]'):
"""Provides a title and axes labels to a Matplotlib plot."""
mpp.title(title)
mpp.xlabel(xlabel)
mpp.ylabel(ylabel)
def readfile(inputfile='name'):
"""Extracts the xy pairs from an ASCII raw file and stores its values into a numpy array."""
defux = ["ms", "s"]
defuy = ["mV", "V"]
inpunits = False
with open(inputfile, 'r') as datafile:
line = 1
inpuxy = [] # header might be missing
while line <= 25: # arbitrary Clampfit header limit for ATF
headerline = datafile.readline()
if headerline.startswith("\""):
inpuxy = str(headerline).split() # last line of header contains units
skipline = line
if not inpuxy:
skipline = 0
line += 1
try:
inpux = inpuxy[1][1:-2]
inpuy = inpuxy[4][1:-2]
except IndexError: # missing header
inpux, inpuy = str(defux)[1:-1], str(defuy)[1:-1]
else: # header found
if inpux in defux and inpuy in defuy:
inpunits = True
datafile.seek(0) # reset the file index to the first byte
inp_xy = np.loadtxt(datafile, dtype='float64', delimiter='\t', skiprows=skipline, unpack=True) # slower than np.genfromtxt or native python, but uses less main memory at peak
return inp_xy, inpunits, inpux, inpuy
# main routine
AUTHOR = "Copyright (C) 2018 Christian Rickert"
SEPARBOLD = 79*'='
SEPARNORM = 79*'-'
SOFTWARE = "ParamAP"
VERSION = "version 1.1," # (2018-03-10)
WORKDIR = SOFTWARE # working directory for parameterization
print('{0:^79}'.format(SEPARBOLD) + os.linesep)
GREETER = '{0:<{w0}}{1:<{w1}}{2:<{w2}}'.format(SOFTWARE, VERSION, AUTHOR, w0=len(SOFTWARE)+1, w1=len(VERSION)+1, w2=len(AUTHOR)+1)
INTERMEDIATELINE1 = '{0:}'.format("Laboratory of Cathy Proenza")
INTERMEDIATELINE2 = '{0:}'.format("Department of Physiology & Biophysics")
INTERMEDIATELINE3 = '{0:}'.format("University of Colorado, Anschutz Medical Campus")
DISCLAIMER = "ParamAP is distributed in the hope that it will be useful, but it comes without\nany guarantee or warranty. This program is free software; you can redistribute\nit and/or modify it under the terms of the GNU General Public License:"
URL = "https://www.gnu.org/licenses/gpl-2.0.en.html"
print('{0:^79}'.format(GREETER) + os.linesep)
print('{0:^79}'.format(INTERMEDIATELINE1))
print('{0:^79}'.format(INTERMEDIATELINE2))
print('{0:^79}'.format(INTERMEDIATELINE3) + os.linesep)
print('{0:^79}'.format(DISCLAIMER) + os.linesep)
print('{0:^79}'.format(URL) + os.linesep)
print('{0:^79}'.format(SEPARBOLD) + os.linesep)
# customize use case
AUTORUN = askboolean("Use automatic mode?", False)
SERIES = askboolean("Run time series analysis?", False)
APMODE = askboolean("Analyze action potentials?", True)
print('{0:^79}'.format(SEPARNORM))
# set up working directory
WORKPATH = os.path.abspath(WORKDIR)
if not os.path.exists(WORKPATH):
os.mkdir(WORKPATH)
print("FOLDER:\t" + WORKPATH + "\n")
FILE = 0 # file
EXTENSION = askext(dlabel="custom file type", dext='atf') # file extension used to filter files in working directory
if SERIES:
AVG_FRAME = askvalue(dlabel="analysis frame time", dval=5000.0, dunit=' ms') # time interval for series analysis (ms)
ATFFILES = getfiles(path=WORKDIR, pattern=("*." + EXTENSION))
for ATFFILE in ATFFILES: # iterate through files
name = os.path.splitext(os.path.split(ATFFILE)[1])[0]
print('{0:^79}'.format(SEPARNORM))
print("FILE:\t" + str(name) + os.linesep)
ap_amp = 50.0 # minimum acceptable ap amplitude (mV)
ap_hwd = 250.0 # maximum acceptable ap half width (ms)
ap_max = 50.0 # maximum acceptable ap value (mV)
ap_min = -10.0 # minimum acceptable ap value (mV)
mdp_max = -50.0 # maximum acceptable mdp value (mV)
mdp_min = -90.0 # minimum acceptable mdp value (mV)
wm_der = 1.0 # window multiplier for derivative filtering
wm_max = 4.0 # window multiplier for maximum detection
wm_min = 16.0 # window multiplier for minimum detection
# read file raw data
sys.stdout.write(">> READING... ")
sys.stdout.flush()
RAW_XY, UNITS, UNIT_X, UNIT_Y = readfile(ATFFILE)
if not UNITS: # missing or incomplete units from header
print("\n")
UNIT_X = askunit(dlabel="X-axis unit", daxis="X", dunit=UNIT_X)
UNIT_Y = askunit(dlabel="Y-axis unit", daxis="Y", dunit=UNIT_Y)
sys.stdout.write(1*"\t")
toms = 1000.0 if UNIT_X == "s" else 1.0
RAW_XY[0] *= toms # full X-axis, UNIT_X = "ms"
raw_x = RAW_XY[0] # partial X-axis for time series analysis
tomv = 1000.0 if UNIT_Y == "V" else 1.0
RAW_XY[1] *= tomv # full Y-axis, UNIT_Y = "mV"
raw_y = RAW_XY[1] # partial Y-axis for time series analysis
runavg = getrunavg(RAW_XY[0]) # used for filtering and peak detection
ipg_t = RAW_XY[0][1]-RAW_XY[0][0] # time increment for interpolation grid
if not APMODE: # avoid noise artifacts in beat detection mode
runavg = 10.0*runavg+1
wm_max *= 1.5
wm_min = wm_max
avg_start = RAW_XY[0][0] # interval start for averaging
avg_stop = RAW_XY[0][-1] # interval stop for averaging
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
while True: # repeat data analysis for current file
startpdf = True # overwrite existing file
segment = 0.0
while True: # time series analysis
try:
# create raw data plot
sys.stdout.write(">> PLOTTING... ")
sys.stdout.flush()
mpp_setup(title="Raw data: " + name, xlabel='Time (ms)', ylabel='Voltage (mV)')
mpp.plot(raw_x, raw_y, '0.75') # raw data (grey line)
if startpdf:
pdf_file = mpbp.PdfPages(os.path.join(WORKDIR, name + ".pdf"), keep_empty=False) # multi-pdf file
startpdf = False # append existing file
mpp.tight_layout() # avoid label overlaps
if segment == 0.0:
mpp.savefig(pdf_file, format='pdf', dpi=600) # save before .show()!
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
if not AUTORUN:
mpp.show()
# set parameters for averaging
sys.stdout.write(">> SETTING... ")
sys.stdout.flush()
if not AUTORUN:
print("\n")
if segment == 0.0: # initialize values
avg_start = askvalue(dlabel="analysis start time", dval=avg_start, dunit=' ms')
avg_stop = askvalue(dlabel="analysis stop time", dval=avg_stop, dunit=' ms')
ap_max = askvalue(dlabel="upper limit for maxima", dval=ap_max, dunit=' mV')
ap_min = askvalue(dlabel="lower limit for maxima", dval=ap_min, dunit=' mV')
mdp_max = askvalue(dlabel="upper limit for minima", dval=mdp_max, dunit=' mV')
mdp_min = askvalue(dlabel="lower limit for minima", dval=mdp_min, dunit=' mV')
if APMODE:
ap_hwd = askvalue(dlabel="maximum peak half width", dval=ap_hwd, dunit=' ms')
ap_amp = askvalue(dlabel="minimum peak amplitude", dval=ap_amp, dunit=' mV')
runavg = askvalue(dlabel="running average window size", dval=runavg, dunit='', dtype='int')
wm_der = askvalue(dlabel="window multiplier for derivative", dval=wm_der, dunit='')
wm_max = askvalue(dlabel="window multiplier for maxima", dval=wm_max, dunit='')
wm_min = askvalue(dlabel="window multiplier for minima", dval=wm_min, dunit='')
mpp.clf() # clear canvas
if segment == 0.0: # set first frame
tmp_start = avg_start + (segment*AVG_FRAME if SERIES else 0.0)
tmp_stop = (tmp_start + AVG_FRAME) if SERIES else avg_stop
raw_i = np.argwhere((RAW_XY[0] >= tmp_start) & (RAW_XY[0] <= tmp_stop)).ravel()
raw_x = RAW_XY[0][raw_i[0]:raw_i[-1]+1]
raw_y = RAW_XY[1][raw_i[0]:raw_i[-1]+1]
sys.stdout.write(("" if AUTORUN else 1*"\t") + 8*"\t" + " [OK]\n")
sys.stdout.flush()
# filter noise of raw data with Savitzky-Golay
sys.stdout.write(">> FILTERING... ")
sys.stdout.flush()
rawf_y = sp_sig.savgol_filter(raw_y, runavg, POLYNOM, mode='nearest')
sys.stdout.write(7*"\t" + " [OK]\n")
sys.stdout.flush()
# detect extrema in filtered raw data
sys.stdout.write(">> SEARCHING... ")
sys.stdout.flush()
if AUTORUN: # use unrestricted dataset (slower)
# detect maxima in filtered raw data
tmpavg = int(round(wm_max*runavg)) if int(round(wm_max*runavg)) % 2 else int(round(wm_max*runavg))+1
rawfmax_iii = np.asarray(sp_sig.argrelmax(rawf_y, order=tmpavg)).ravel() # unfiltered maxima
rawfmax_x = raw_x[rawfmax_iii]
rawfmax_y = rawf_y[rawfmax_iii]
# detect minima in filtered raw data
tmpavg = int(round(wm_min*runavg)) if int(round(wm_min*runavg)) % 2 else int(round(wm_min*runavg))+1
rawfmin_iii = np.asarray(sp_sig.argrelmin(rawf_y, order=tmpavg)).ravel() # unfiltered minima
rawfmin_x = raw_x[rawfmin_iii]
rawfmin_y = rawf_y[rawfmin_iii]
sys.stdout.write(7*"\t" + " [OK]\n")
sys.stdout.flush()
else: # use restricted dataset (faster)
# detect maxima in filtered raw data
tmpmax_x = raw_x[np.intersect1d(np.argwhere(rawf_y >= ap_min), np.argwhere(rawf_y <= ap_max))]
tmpmax_y = rawf_y[np.intersect1d(np.argwhere(rawf_y >= ap_min), np.argwhere(rawf_y <= ap_max))]
tmpavg = int(round(wm_max*runavg)) if int(round(wm_max*runavg)) % 2 else int(round(wm_max*runavg))+1
rawfmax_iii = np.asarray(sp_sig.argrelmax(tmpmax_y, order=tmpavg)).ravel() # unfiltered maxima
rawfmax_ii = np.asarray(np.where(np.in1d(raw_x.ravel(), np.intersect1d(raw_x, tmpmax_x[rawfmax_iii]).ravel()).reshape(raw_x.shape))).ravel() # back to full dataset
rawfmax_x = raw_x[rawfmax_ii]
rawfmax_y = rawf_y[rawfmax_ii]
# detect minima in filtered raw data
tmpmin_x = raw_x[np.intersect1d(np.argwhere(rawf_y >= mdp_min), np.argwhere(rawf_y <= mdp_max))]
tmpmin_y = rawf_y[np.intersect1d(np.argwhere(rawf_y >= mdp_min), np.argwhere(rawf_y <= mdp_max))]
tmpavg = int(round(wm_min*runavg)) if int(round(wm_min*runavg)) % 2 else int(round(wm_min*runavg))+1
rawfmin_iii = np.asarray(sp_sig.argrelmin(tmpmin_y, order=tmpavg)).ravel() # unfiltered minima
rawfmin_ii = np.asarray(np.where(np.in1d(raw_x.ravel(), np.intersect1d(raw_x, tmpmin_x[rawfmin_iii]).ravel()).reshape(raw_x.shape))).ravel()
rawfmin_x = raw_x[rawfmin_ii]
rawfmin_y = rawf_y[rawfmin_ii]
sys.stdout.write(7*"\t" + " [OK]\n")
sys.stdout.flush()
# analyze and reduce extrema in filtered raw data
sys.stdout.write(">> REDUCING... ")
sys.stdout.flush()
rawfmax_m = np.mean(rawfmax_y) # rough estimate due to assignment errors
rawfmin_m = np.mean(rawfmin_y)
rawfmaxmin_m = (rawfmax_m + rawfmin_m) / 2.0 # center between unreduced maxima and minima within limits (may differ from average of AVGMAX and AVGMIN)
if AUTORUN: # estimate range for reduction of extrema
# reduce maxima from unrestricted dataset
rawfmax_ii = np.argwhere(rawfmax_y >= rawfmaxmin_m).ravel() # use center to discriminate between maxima and minima
rawfmax_x = rawfmax_x[rawfmax_ii]
rawfmax_y = rawfmax_y[rawfmax_ii]
rawfmax_std = np.std(rawfmax_y, ddof=1) # standard deviation from the (estimated) arithmetic mean
ap_max = np.mean(rawfmax_y) + 4.0 * rawfmax_std # 99% confidence interval
ap_min = np.mean(rawfmax_y) - 4.0 * rawfmax_std
rawfmax_ii = functools.reduce(np.intersect1d, (rawfmax_iii, np.argwhere(rawf_y >= ap_min), np.argwhere(rawf_y <= ap_max)))
rawfmax_x = raw_x[rawfmax_ii]
rawfmax_y = rawf_y[rawfmax_ii]
# reduce minima from unrestricted dataset
rawfmin_ii = np.argwhere(rawfmin_y <= rawfmaxmin_m)
rawfmin_x = rawfmin_x[rawfmin_ii].ravel()
rawfmin_y = rawfmin_y[rawfmin_ii].ravel()
rawfmin_std = np.std(rawfmin_y, ddof=1)
mdp_max = np.mean(rawfmin_y) + 4.0 * rawfmin_std
mdp_min = np.mean(rawfmin_y) - 4.0 *rawfmin_std
rawfmin_ii = functools.reduce(np.intersect1d, (rawfmin_iii, np.argwhere(rawf_y >= mdp_min), np.argwhere(rawf_y <= mdp_max)))
rawfmin_x = raw_x[rawfmin_ii]
rawfmin_y = rawf_y[rawfmin_ii]
if APMODE: # check extrema for consistency - reduce maxima
badmax_ii = np.zeros(0)
badmin_ii = np.zeros(0)
rawfmin_i, badmax_ii = getneighbors(rawfmax_ii, rawfmin_ii, raw_x, rawf_y, ap_hwd, ap_amp, bads=True)
rawfmax_i = np.delete(rawfmax_ii, badmax_ii)
rawfmin_i = rawfmin_i.astype(int) # casting required for indexing
# check extrema for boundary violations - reduce maxima and minima
while True: # rough check, assignment happens later
if rawfmax_i[0] < rawfmin_i[0]: # starts with a maximum
rawfmax_i = rawfmax_i[1:]
continue
elif rawfmin_i[1] < rawfmax_i[0]: # starts with two minima
rawfmin_i = rawfmin_i[1:]
continue
elif rawfmax_i[-1] > rawfmin_i[-1]: # ends with a maximum
rawfmax_i = rawfmax_i[0:-1]
continue
elif rawfmin_i[-2] > rawfmax_i[-1]: # ends with two minima
rawfmin_i = rawfmin_i[0:-1]
continue
else:
break
rawfmax_x = raw_x[rawfmax_i] # filtered and extracted maxima
rawfmax_y = rawf_y[rawfmax_i]
# assign minima to corresponding maxima - reduce minima
minmaxmin = np.asarray([3*[0] for i in range(rawfmax_i.size)]) # [[min_left_index, max_index, min_right_index], ...]
rawfmin_kdt = sp_spat.KDTree(list(zip(rawfmin_i, np.zeros(rawfmin_i.size))))
i = 0 # index
for max_i in rawfmax_i:
min_left, min_right = False, False
minmaxmin[i][1] = max_i
for order_i in rawfmin_kdt.query([max_i, 0.0], k=None)[1]:
min_i = rawfmin_i[order_i]
if not min_left and (min_i < max_i):
minmaxmin[i][0] = min_i
min_left = True
elif not min_right and (min_i > max_i):
minmaxmin[i][2] = min_i
min_right = True
i += 1
rawfmin_i = np.unique(minmaxmin[:, [0, 2]].ravel())
rawfmin_x = raw_x[rawfmin_i] # filtered and extracted minima
rawfmin_y = rawf_y[rawfmin_i]
# find largest distance between left minima and maxima
ipg_hwl, ipg_tmp = 0.0, 0.0
for min_l, max_c in minmaxmin[:, [0, 1]]:
ipg_tmp = raw_x[max_c] - raw_x[min_l]
if ipg_tmp > ipg_hwl:
ipg_hwl = ipg_tmp
# find largest distance between right minima and maxima
ipg_hwr, ipg_tmp = 0.0, 0.0
for max_c, min_r in minmaxmin[:, [1, 2]]:
ipg_tmp = raw_x[min_r] - raw_x[max_c]
if ipg_tmp > ipg_hwr:
ipg_hwr = ipg_tmp
else: # beating rate
rawfmax_x = raw_x[rawfmax_ii] # pre-filtered maxima
rawfmax_y = rawf_y[rawfmax_ii]
rawfmin_x = raw_x[rawfmin_ii] # pre-filtered minima
rawfmin_y = rawf_y[rawfmin_ii]
rawfmax_m = np.mean(rawfmax_y) # refined estimate due to exlusion (ap_mode)
rawfmin_m = np.mean(rawfmin_y)
if rawfmax_y.size == 0: # no APs detected
raise Warning
else: # two or more APs
frate = 60000.0*(rawfmax_y.size/(rawfmax_x[-1]-rawfmax_x[0])) if rawfmax_y.size > 1 else float('nan') # AP firing rate (FR) [1/min]
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
# create extrema plot
sys.stdout.write(">> PLOTTING... ")
sys.stdout.flush()
mpp_setup(title="Extrema: " + name, xlabel='Time (ms)', ylabel='Voltage (mV)')
mpp.plot([raw_x[0], raw_x[-1]], [0.0, 0.0], '0.85') # X-Axis (grey line)
mpp.plot([raw_x[0], raw_x[-1]], [rawfmaxmin_m, rawfmaxmin_m], 'k--') # center between unfiltered maxima and unfiltered minima, i.e. not between AVGMAX and AVGMIN (black dashed line)
mpp.plot(raw_x, raw_y, '0.50', raw_x, rawf_y, 'r') # raw data and averaged data (grey, red line)
mpp.plot([raw_x[0], raw_x[-1]], [ap_max, ap_max], 'b') # upper limit for maxima (blue dotted line)
mpp.plot([raw_x[0], raw_x[-1]], [ap_min, ap_min], 'b:') # lower limit for maxima (blue dotted line)
mpp.plot([rawfmax_x, rawfmax_x], [ap_min, ap_max], 'b') # accepted maxima (blue line)
mpp.plot([raw_x[0], raw_x[-1]], [mdp_min, mdp_min], 'g') # lower limit for minima (green line)
mpp.plot([raw_x[0], raw_x[-1]], [mdp_max, mdp_max], 'g:') # upper limit for minima (green dotted line)
mpp.plot([rawfmin_x, rawfmin_x], [mdp_min, mdp_max], 'g') # accepted minima (green line)
mpp.plot([rawfmax_x[0], rawfmax_x[-1]], [rawfmax_m, rawfmax_m], 'k') # average of maxima, time interval used for firing rate count (black line)
mpp.plot([rawfmin_x[0], rawfmin_x[-1]], [rawfmin_m, rawfmin_m], 'k') # average of minima (black line)
mpp.plot(raw_x[rawfmax_ii], rawf_y[rawfmax_ii], 'bo') # maxima (blue dots)
mpp.plot(raw_x[rawfmin_ii], rawf_y[rawfmin_ii], 'go') # minima (green dots)
mpp.figtext(0.12, 0.90, "{0:<s} {1:<.4G}".format("AVGMAX (mV):", rawfmax_m), ha='left', va='center')
mpp.figtext(0.12, 0.87, "{0:<s} {1:<.4G}".format("FR (AP/min):", frate), ha='left', va='center')
mpp.figtext(0.12, 0.84, "{0:<s} {1:<.4G}".format("AVGMIN (mV):", rawfmin_m), ha='left', va='center')
mpp.tight_layout()
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
sys.stdout.write(">> SAVING... ")
sys.stdout.flush()
mpp.savefig(pdf_file, format='pdf', dpi=600)
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
if not AUTORUN:
mpp.show()
mpp.clf()
if APMODE:
# slice raw data segments by minima and align by maxima
sys.stdout.write(">> AVERAGING... ")
sys.stdout.flush()
# align ap segments by maxima, extend and average ap segments
ipg_max = float((1.0+APEXT)*ipg_hwr)
ipg_min = -float((1.0+APEXT)*ipg_hwl)
avg_x = np.arange(ipg_min, ipg_max, ipg_t, dtype='float64') # interpolation grid
avgxsize = avg_x.size
avg_y = np.zeros(avgxsize, dtype='float64') # ap average array
mpp.subplot2grid((4, 1), (0, 0), rowspan=3) # upper subplot
timestamp = "[" + str(round(tmp_start, 2)) + "ms-" + str(round(tmp_stop, 2)) + "ms]"
mpp_setup(title='Analysis: ' + name + ' ' + timestamp, xlabel='Time (ms)', ylabel='Voltage (mV)')
mpp.plot([avg_x[0], avg_x[-1]], [0.0, 0.0], '0.85') # X-axis
n = 0 # current maximum
for min_l, max_c, min_r in minmaxmin: # slicing of ap segments, extend ap parts if possible
minext_l = int(min_l - APEXT*(max_c - min_l)) # use int for index slicing
minext_r = int(min_r + APEXT*(min_r - max_c))
# prepare ap segment
tmp_x = np.asarray(raw_x[:] - raw_x[max_c]) # align by maximum
tmp_y = np.interp(avg_x, tmp_x, raw_y[:])
# average ap segments
if n == 0: # first average
avg_y = np.copy(tmp_y)
else: # all other averages
i = 0 # array index
nw = (1.0/(n+1.0)) # new data weight
pw = (n/(n+1.0)) # previous data weight
for y in np.nditer(avg_y, op_flags=['readwrite']):
y[...] = pw*y + nw*tmp_y[i] # integrate raw data into averaged data
i += 1
n += 1
mpp.plot(avg_x, tmp_y, '0.75') # plot aligned raw data segments
sys.stdout.write("\t\t\t\t\t\t\t [OK]\n")
sys.stdout.flush()
# analyze AP parameters with given criteria
sys.stdout.write(">> ANALYZING... ")
sys.stdout.flush()
# filter noise of averaged data with Savitzky-Golay
avgf_y = sp_sig.savgol_filter(avg_y, runavg, POLYNOM, mode='nearest')
# detect "Peak potential: Maximum potential of AP" (PP) (mV)
avgfmax_i = np.argwhere(avg_x == 0.0) # data point for maximum centered
if not avgfmax_i: # data point for maximum left or right of center
tmpavg = int(round(wm_max*runavg)) if int(round(wm_max*runavg)) % 2 else int(round(wm_max*runavg))+1
avgfmax_ii = np.asarray(sp_sig.argrelmax(avgf_y, order=tmpavg)).ravel() # find all maxima
avgfmax_i = avgfmax_ii[np.argmin(np.abs(avg_x[avgfmax_ii] - 0.0))] # return the maximum closes to X = 0.0
avgfmax_x = avg_x[avgfmax_i]
avgfmax_y = avgf_y[avgfmax_i]
pp_y = float(avgfmax_y)
pp = pp_y
# detect and reduce (several) minima in filtered average data,
tmpavg = int(round(wm_min*runavg)) if int(round(wm_min*runavg)) % 2 else int(round(wm_min*runavg))+1
avgfmin_ii = np.asarray(sp_sig.argrelmin(avgf_y, order=tmpavg)).ravel() # find all minima
avgfmin_i = getneighbors(np.asarray([avgfmax_i]), avgfmin_ii, avg_x, avgf_y, ap_hwd, ap_amp)
avgfmin_x = avg_x[avgfmin_i]
avgfmin_y = avgf_y[avgfmin_i]
# determine "Maximum diastolic potential 1: Minimum potential preceding PP" (MDP1) (mV)
mdp1_i = avgfmin_i[0]
mdp1_x = avg_x[mdp1_i]
mdp1_y = avgf_y[mdp1_i]
mdp1 = mdp1_y
# determine "Maximum diastolic potential 2: Minimum potential following PP" (MDP2) (mV)
mdp2_i = avgfmin_i[-1]
mdp2_x = avg_x[mdp2_i]
mdp2_y = avgf_y[mdp2_i]
mdp2 = mdp2_y
# determine "Cycle length: Time interval MDP1-MDP2" (CL) (ms)
cl = float(mdp2_x - mdp1_x)
# determine "Action potential amplitude: Potential difference of PP minus MDP2" (APA) (mV)
apa = pp - mdp2
# determine "AP duration 50: Time interval at 50% of maximum repolarization" (APD50) (ms)
apd50_l = (pp - 0.50*apa) # threshold value
apd50_i = functools.reduce(np.intersect1d, (np.argwhere(avgf_y > apd50_l), np.argwhere(avg_x >= mdp1_x), np.argwhere(avg_x <= mdp2_x)))
apd50_x = (avg_x[apd50_i[0]-1], avg_x[apd50_i[-1]+1]) # equal or smaller than apd50_l
apd50_y = (avgf_y[apd50_i[0]-1], avgf_y[apd50_i[-1]+1])
apd50 = float(apd50_x[-1] - apd50_x[0])
# determine "AP duration 90: Time interval at 90% of maximum repolarization" (APD90) (ms)
apd90_l = pp - 0.90*apa
apd90_i = functools.reduce(np.intersect1d, (np.argwhere(avgf_y > apd90_l), np.argwhere(avg_x >= mdp1_x), np.argwhere(avg_x <= mdp2_x)))
apd90_x = (avg_x[apd90_i[0]-1], avg_x[apd90_i[-1]+1]) # equal or smaller than apd90_l
apd90_y = (avgf_y[apd90_i[0]-1], avgf_y[apd90_i[-1]+1])
apd90 = float(apd90_x[-1] - apd90_x[0])
# calculate derivative of averaged data (mV/ms)
avgfg_y = np.ediff1d(avgf_y) # dY/1, differences between values
avgfg_y = np.insert(avgfg_y, 0, avgfg_y[0]) # preserve array size
avgfg_y = avgfg_y / ipg_t # dY/dX, differences per increment
# filter derivative of averaged data
tmpavg = int(round(wm_der*runavg)) if int(round(wm_der*runavg)) % 2 else int(round(wm_der*runavg))+1
avgfgf_y = sp_sig.savgol_filter(avgfg_y, tmpavg, POLYNOM, mode='nearest')
# determine "Maximum upstroke velocity: Maximum of derivative between MDP1 and PP" (MUV) (mV/ms)
tmpavg = int(round(wm_max*runavg)) if int(round(wm_max*runavg)) % 2 else int(round(wm_max*runavg))+1
avgfgfmax_ii = functools.reduce(np.intersect1d, (sp_sig.argrelmax(avgfgf_y, order=tmpavg), np.argwhere(avg_x >= mdp1_x), np.argwhere(avg_x <= avgfmax_x)))
avgfgfmax_i = getneighbors(np.asarray([avgfmax_i]), avgfgfmax_ii, avg_x, avgfgf_y)[0] # avoid errors from large ap part extensions
avgfgfmax_x = avg_x[avgfgfmax_i]
avgfgfmax_y = avgfgf_y[avgfgfmax_i]
muv = float(avgfgfmax_y)
# determine "Maximum repolarization rate: Minimum of derivative between PP and MDP2" (MRR) (mV/ms)
tmpavg = int(round(wm_min*runavg)) if int(round(wm_min*runavg)) % 2 else int(round(wm_min*runavg))+1
avgfgfmin_ii = functools.reduce(np.intersect1d, (sp_sig.argrelmin(avgfgf_y, order=tmpavg), np.argwhere(avg_x >= avgfmax_x), np.argwhere(avg_x <= mdp2_x)))
avgfgfmin_i = getneighbors(np.asarray([apd90_i[-1]+1]), avgfgfmin_ii, avg_x, avgfgf_y)[0] # mrr or trr
avgfgfmin_i = np.append(avgfgfmin_i, getneighbors(np.asarray([avgfgfmax_i]), avgfgfmin_ii, avg_x, avgfgf_y)[1]) # trr only
if avgfgfmin_i[0] == avgfgfmin_i[1]: # no trr
trr = 0.0
else:
# determine "Transient repolarization rate: Second minimum of derivative between PP and MDP2 after PP, if distinct from MRR" (TRR) (mV/ms)
trr = float(avgfgf_y[avgfgfmin_i][1])
avgfgfmin_x = avg_x[avgfgfmin_i]
avgfgfmin_y = avgfgf_y[avgfgfmin_i]
mrr = float(avgfgf_y[avgfgfmin_i][0])
# approximate diastolic duration in filtered derivative
da_i, da_x, da_y, da_m, da_n, da_r = getbestlinearfit(avg_x, avgfgf_y, mdp1_x, apd90_x[0], 10, 90, 1, 40) # get a baseline for the derivative before exceeding the threshold
# determine "Threshold potential: Potential separating DD and APD." (THR) (mV)
thr_i = functools.reduce(np.intersect1d, (np.argwhere(avgfgf_y >= ((da_m*avg_x + da_n) + 0.5)), np.argwhere(avg_x >= avg_x[da_i[-1]]), np.argwhere(avg_x <= apd50_x[0])))[0].astype(int) # determine baseline-corrected threshold level
thr_x = avg_x[thr_i]
thr_y = avgf_y[thr_i]
thr = float(thr_y)
# determine "Early diastolic duration: Time from MDP1 to end of linear fit for DDR" (EDD) (ms)
edd_i, edd_x, edd_y, edd_m, edd_n, edd_r = getbestlinearfit(avg_x, avgf_y, mdp1_x, thr_x, 10, 50, 1, 20) # fit EDD within the threshold level determined earlier
edd = float(edd_x[-1]-mdp1_x)
# determine "Diastolic depolarization rate: Potential change rate at end of EDD" (DDR) (mV/ms)
ddr = float(edd_m) # or: np.mean(avgfgf_y[edd_i])
# determine "Diastolic duration: EDD plus LDD" (DD) (ms)
dd = float(thr_x - mdp1_x)
# determine "Late diastolic duration: Time from end of linear fit for DDR to THR" (LDD) (ms)
ldd = float(thr_x - edd_x[-1])
# determine "Action potential duration: Time between THR and MDP2" (APD) (ms)
apd = float(mdp2_x - thr_x)
sys.stdout.write("\t\t\t\t\t\t\t [OK]\n")
sys.stdout.flush()
# create analysis plot
sys.stdout.write(">> PLOTTING... ") # the X-axis and the individual segments are already plotted during averaging
sys.stdout.flush()
mpp.plot([mdp1_x, thr_x], [mdp1_y, mdp1_y], 'k-.') # DD (black dashed/dotted line)
mpp.plot([thr_x, mdp2_x], [mdp2_y, mdp2_y], 'k') # APD (black line)
mpp.plot([apd50_x[0], apd50_x[1]], [apd50_y[1], apd50_y[1]], 'k') # APD50 (black line)
mpp.plot([apd90_x[0], apd90_x[1]], [apd90_y[1], apd90_y[1]], 'k') # APD90 (black line)
mpp.plot([mdp1_x, mdp1_x], [mdp1_y, 0.0], 'k:') # MDP1 indicator (black dotted line)
mpp.plot([mdp2_x, mdp2_x], [mdp2_y, 0.0], 'k:') # MDP2 indicator (black dotted line)
mpp.plot([avgfgfmax_x, avgfgfmax_x], [mdp2_y, avgf_y[avgfgfmax_i]], 'k:') # MUV indicator (black dotted line)
mpp.plot([avgfgfmin_x[0], avgfgfmin_x[0]], [mdp2_y, avgf_y[avgfgfmin_i[0]]], 'k:') # MRR indicator (black dotted line)
if trr:
mpp.plot([avgfgfmin_x[1], avgfgfmin_x[1]], [mdp2_y, avgf_y[avgfgfmin_i[1]]], 'k:') # TRR indicator (black dotted line)
mpp.plot([edd_x[-1], edd_x[-1]], [mdp2_y, 0.0], 'k:') # EDD/LDD separator (black dashed line)
mpp.plot([thr_x, thr_x], [thr_y, 0.0], 'k:') # DD/APD upper separator (black dotted line)
mpp.plot([thr_x, thr_x], [mdp2_y, thr_y], 'k:') # DD/APD lower separator (black dotted line)
mpp.plot(avg_x, avg_y, 'k', avg_x, avgf_y, 'r') # averaged data and filtered averaged data (black, red lines)
mpp.plot(avg_x[edd_i], avgf_y[edd_i], 'g') # best linear fit segment for DDR (green line)
mpp.plot(avg_x, (edd_m*avg_x + edd_n), 'k--') # DDR (black dashed line)
mpp.plot([edd_x[-1]], [edd_y[-1]], 'ko') # EDD-LDD separator (black dot)
mpp.plot([apd50_x[1]], [apd50_y[1]], 'ko') # APD50 (black dots)
mpp.plot(apd90_x[1], apd90_y[1], 'ko') # APD90 (black dots)
mpp.plot(thr_x, avgf_y[thr_i], 'ro') # THR (red dot)
mpp.plot(avgfgfmax_x, avgf_y[avgfgfmax_i], 'wo') # MUV (white dot)
mpp.plot(avgfgfmin_x[0], avgf_y[avgfgfmin_i[0]], 'wo') # MRR (white dot)
if trr:
mpp.plot(avgfgfmin_x[1], avgf_y[avgfgfmin_i[1]], 'wo') # TRR (dot)
mpp.plot(avgfmax_x, pp_y, 'bo') # PP (blue dot)
mpp.plot(avgfmin_x, avgfmin_y, 'go') # MDP1, MDP2 (green dots)
mpp.figtext(0.12, 0.90, "{0:<s} {1:<.4G}".format("APs (#):", rawfmax_y.size), ha='left', va='center')
mpp.figtext(0.12, 0.87, "{0:<s} {1:<.4G}".format("FR (AP/min):", frate), ha='left', va='center')
mpp.figtext(0.12, 0.84, "{0:<s} {1:<.4G}".format("CL (ms):", cl), ha='left', va='center')
mpp.figtext(0.12, 0.81, "{0:<s} {1:<.4G}".format("DD (ms):", dd), ha='left', va='center')
mpp.figtext(0.12, 0.78, "{0:<s} {1:<.4G}".format("EDD (ms):", edd), ha='left', va='center')
mpp.figtext(0.12, 0.75, "{0:<s} {1:<.4G}".format("LDD (ms):", ldd), ha='left', va='center')
mpp.figtext(0.12, 0.72, "{0:<s} {1:<.4G}".format("APD (ms):", apd), ha='left', va='center')
mpp.figtext(0.12, 0.69, "{0:<s} {1:<.4G}".format("APD50 (ms):", apd50), ha='left', va='center')
mpp.figtext(0.12, 0.66, "{0:<s} {1:<.4G}".format("APD90 (ms):", apd90), ha='left', va='center')
mpp.figtext(0.12, 0.63, "{0:<s} {1:<.4G}".format("MDP1 (mV):", mdp1), ha='left', va='center')
mpp.figtext(0.12, 0.60, "{0:<s} {1:<.4G}".format("MDP2 (mV):", mdp2), ha='left', va='center')
mpp.figtext(0.12, 0.57, "{0:<s} {1:<.4G}".format("THR (mV):", thr), ha='left', va='center')
mpp.figtext(0.12, 0.54, "{0:<s} {1:<.4G}".format("PP (mV):", pp), ha='left', va='center')
mpp.figtext(0.12, 0.51, "{0:<s} {1:<.4G}".format("APA (mV):", apa), ha='left', va='center')
mpp.figtext(0.12, 0.48, "{0:<s} {1:<.4G}".format("DDR (mV/ms):", ddr), ha='left', va='center')
mpp.figtext(0.12, 0.45, "{0:<s} {1:<.4G}".format("MUV (mV/ms):", muv), ha='left', va='center')
mpp.figtext(0.12, 0.42, "{0:<s} {1:<.4G}".format("TRR (mV/ms):", trr), ha='left', va='center')
mpp.figtext(0.12, 0.39, "{0:<s} {1:<.4G}".format("MRR (mV/ms):", mrr), ha='left', va='center')
mpp.subplot2grid((4, 1), (3, 0)) # lower subplot
mpp_setup(title="", xlabel='Time (ms)', ylabel='(mV/ms)')
mpp.plot([avg_x[0], avg_x[-1]], [0.0, 0.0], '0.85') # x axis
mpp.plot([avgfgfmin_x[0], avgfgfmin_x[0]], [avgfgfmin_y[0], avgfgfmax_y], 'k:') # MRR indicator (black dotted line)
if trr:
mpp.plot([avgfgfmin_x[1], avgfgfmin_x[1]], [avgfgfmin_y[1], avgfgfmax_y], 'k:') # TRR indicator (black dotted line)
mpp.plot([thr_x, thr_x], [avgfgf_y[thr_i], avgfgfmax_y], 'k:') # THR indicator (black dotted line)
mpp.plot(avg_x, avgfg_y, 'c', avg_x, avgfgf_y, 'm') # derivative and filtered derivative
mpp.plot(avg_x[da_i], avgfgf_y[da_i], 'g') # best linear fit segment for THR (green line)
mpp.plot(avg_x, (da_m*avg_x + da_n), 'k--') # best linear fit for THR (black dashed line)
mpp.plot(thr_x, avgfgf_y[thr_i], 'ro') # THR (red dot)
mpp.plot(avgfgfmax_x, avgfgfmax_y, 'bo') # derivative maximum (blue dot)
mpp.plot(avgfgfmin_x, avgfgfmin_y, 'go') # derivative minima (green dots)
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
# data summary
sys.stdout.write(">> SAVING... ")
sys.stdout.flush()
avg_file = os.path.join(WORKDIR, name + "_" + timestamp + "_avg.dat")
uheader = "" +\
"Analysis start time: " + 4*"\t" + str(tmp_start) + " ms\n" + \
"Analysis stop time:" + 4*"\t" + str(tmp_stop) + " ms\n" + \
"Upper limit for maxima:" + 3*"\t" + str(ap_max) + " mV\n" + \
"Lower limit for maxima:" + 3*"\t" + str(ap_min) + " mV\n" + \
"Upper limit for minima:" + 3*"\t" + str(mdp_max) + " mV\n" + \
"Lower limit for minima:" + 3*"\t" + str(mdp_min) + " mV\n" + \
"Maximum peak half width:" + 3*"\t" + str(ap_hwd) + " ms\n" + \
"Minimum peak amplitude:" + 3*"\t" + str(ap_amp) + " mV\n" + \
"Running average window size:" + 2*"\t" + str(runavg) + "\n" + \
"Window multiplier for derivative:" + "\t" + str(wm_der) + "\n" + \
"Window multiplier for maxima:" + 2*"\t" + str(wm_max) + "\n" + \
"Window multiplier for minima:" + 2*"\t" + str(wm_min) + "\n" + \
"Time (ms)" + "\t" + "Averaged signal (mV)" + "\t" + "Filtered average (mV)"
np.savetxt(avg_file, np.column_stack((avg_x, avg_y, avgf_y)), fmt='%e', delimiter='\t', header=uheader)
mpp.tight_layout()
mpp.savefig(pdf_file, format='pdf', dpi=600)
sum_file = os.path.join(WORKDIR, "ParamAP.log")
newfile = not bool(os.path.exists(sum_file))
with open(sum_file, 'a') as targetfile: # append file
if newfile: # write header
targetfile.write(
"{0:s}\t{1:s}\t{2:s}\t{3:s}\t{4:s}\t{5:s}\t{6:s}\t{7:s}\t{8:s}\t{9:s}\t{10:s}\t{11:s}\t{12:s}\t{13:s}\t{14:s}\t{15:s}\t{16:s}\t{17:s}\t{18:s}\t{19:s}\t{20:s}".format(
"File ( )", "Start (ms)", "Stop (ms)", "APs (#)", "FR (AP/min)", "CL (ms)", "DD (ms)", "EDD (ms)", "LDD (ms)", "APD (ms)", "APD50 (ms)", "APD90 (ms)", "MDP1 (mV)", "MDP2 (mV)", "THR (mV)", "PP (mV)", "APA (mV)", "DDR (mV/ms)", "MUV (mV/ms)", "TRR (mV/ms)", "MRR (mV/ms)") + "\n")
targetfile.write(
"{0:s}\t{1:4G}\t{2:4G}\t{3:4G}\t{4:4G}\t{5:4G}\t{6:4G}\t{7:4G}\t{8:4G}\t{9:4G}\t{10:4G}\t{11:4G}\t{12:4G}\t{13:4G}\t{14:4G}\t{15:4G}\t{16:4G}\t{17:4G}\t{18:4G}\t{19:4G}\t{20:4G}".format(
name, tmp_start, tmp_stop, rawfmax_y.size, frate, cl, dd, edd, ldd, apd, apd50, apd90, mdp1, mdp2, thr, pp, apa, ddr, muv, trr, mrr) + "\n")
targetfile.flush()
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
if not AUTORUN:
mpp.show()
except IndexError as ierr: # check running average and window multiplier
sys.stdout.write("\n" + 9*"\t" + " [ER]")
print("\r ## Run failed. Detection of extrema or threshold failed.")
except PermissionError as perr: # file already opened or storage read-only
sys.stdout.write("\n" + 9*"\t" + " [ER]")
print("\r ## Run failed. File access denied by system.")
except Warning as werr: # increase averaging window time
sys.stdout.write("\n" + 9*"\t" + " [ER]")
print("\r ## Run failed. Identification of action potentials failed.")
except Exception as uerr: # unknown
sys.stdout.write("\n" + 9*"\t" + " [UN]")
print("\r ## Run failed. Error was: {0}".format(uerr) + ".")
except KeyboardInterrupt as kerr: # user canceled this file
sys.stdout.write("\n" + 9*"\t" + " [KO]")
print("\r ## Run skipped. Canceled by user.")
if SERIES: # check for next frame
if tmp_stop + AVG_FRAME <= avg_stop:
segment += 1.0
tmp_start = avg_start + segment*AVG_FRAME # prepare next frame for preview
tmp_stop = tmp_start + AVG_FRAME
raw_i = np.argwhere((RAW_XY[0] >= tmp_start) & (RAW_XY[0] <= tmp_stop)).ravel()
raw_x = RAW_XY[0][raw_i[0]:raw_i[-1]+1]
raw_y = RAW_XY[1][raw_i[0]:raw_i[-1]+1]
print()
print("RUN:\t" + str(int(segment + 1)) + "/" + str(math.floor((avg_stop-avg_start)/AVG_FRAME)))
print()
else: # not enough data left in file
break
else: # no time series analysis
break
if not AUTORUN: # check for next file
print()
nextfile = askboolean("Continue with next file?", True)
if nextfile:
break
else: # re-run current file
raw_x = RAW_XY[0] # recover original rawdata
raw_y = RAW_XY[1]
continue
else: # autorun
break
# housekeeping after each file
FILE += 1
sys.stdout.write(">> CLEANING... ")
sys.stdout.flush()
pdf_file.close() # close multi-pdf file and remove if empty
mpp.clf() # clear canvas
gc.collect() # start garbage collection to prevent memory fragmentation
sys.stdout.write(8*"\t" + " [OK]\n")
sys.stdout.flush()
# print summary
print('{0:^79}'.format(SEPARBOLD))
SUMMARY = "End of run: " + str(FILE) + str(" files" if FILE != 1 else " file") + " processed."
print('{0:^79}'.format(SUMMARY))
print('{0:^79}'.format(SEPARBOLD) + os.linesep)
WAIT = input("Press ENTER to end this program.")
| gpl-2.0 |
Ecotrust/cogs-priorities | util/test_scenarios.py | 3 | 6555 | from django.core.management import setup_environ
import os
import sys
sys.path.append(os.path.dirname(os.path.join('..','priorities',__file__)))
import settings
setup_environ(settings)
#==================================#
from seak.models import Scenario, ConservationFeature, PlanningUnit, Cost, PuVsCf, PuVsCost
from django.contrib.auth.models import User
from django.utils import simplejson as json
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry
import time
import random
def mean(alist):
floatNums = [float(x) for x in alist]
return sum(floatNums) / len(alist)
user, created = User.objects.get_or_create(username='mperry')
scalefactors = []
num_species = []
num_units = []
factors = [2, 3, 4, 5]
#factors = [float(x)/20.0 for x in range(20,100)]
numspecies = [2,3,4]
numcosts = [2,1,3]
targets = [0.5, 0.25]
penalties = [0.5, 0.25]
settings.MARXAN_NUMREPS = 3
#MODE = 'hardcoded'
#MODE = 'query'
MODE = 'create'
if MODE == 'query':
wp = Scenario.objects.filter(name__startswith="Test Scale Factor")
for w in wp:
print "Querying", w.name, w
scalefactors.append(w.input_scalefactor)
r = w.results
num_species.append(r['num_met'])
num_units.append(r['num_units'])
w.kml
COUNT = 0
def create_wp(target_dict, penalties_dict, costs_dict, geography_list, sf):
global COUNT
COUNT += 1
print target_dict
print penalties_dict
print costs_dict
print geography_list
print sf
with open(os.path.join(os.path.dirname(__file__), 'random_words.txt'),'r') as fh:
name = ' '.join([x.strip() for x in random.sample(fh.readlines(), 2)])
name += " - %s" % sf
wp = Scenario(input_targets = json.dumps(
target_dict
),
input_penalties = json.dumps(
penalties_dict
),
input_relativecosts=json.dumps(
costs_dict
),
input_geography=json.dumps(
geography_list
),
input_scalefactor=sf,
name= name, user=user)
return wp
if MODE == 'create':
wp = Scenario.objects.all() #filter(name__startswith="Auto Test Scale Factor")
wp.delete()
cfs = ConservationFeature.objects.all()
keys = []
for c in cfs:
a = c.level_string
while a.endswith('---'):
print a
a = a[:-3]
keys.append(a)
fh = open("/home/mperry/results.csv", 'w+')
fh.write('ncosts, nspecies, sumpenalties, meanpenalties, scalefactor, numspeciesmet, numplannningunits')
fh.write('\n')
fh.flush()
g = GEOSGeometry('POINT(-13874668 %s)' % random.randrange(5005012, 8101549))
for f in factors:
for nc in numcosts:
for n in numspecies:
for i in range(1):
#if random.choice([True,False]):
geography_list = [x.fid for x in PlanningUnit.objects.filter(geometry__strictly_below=g)]
#else:
#geography_list = [x.fid for x in PlanningUnit.objects.filter(geometry__strictly_above=g)]
try:
n = int(n)
target_dict = {}
penalty_dict = {}
# pick n random species
selected_key = random.sample(keys, n) #'blah---blah'
if random.choice([True,False]):
t = random.choice(targets)
p = random.choice(penalties)
else:
t = None
p = None
for key in selected_key:
if t and p:
# Use the predetermined for ALL species
target_dict[key] = t
penalty_dict[key] = p
else:
# random for each species
target_dict[key] = random.choice(targets)
penalty_dict[key] = random.choice(penalties)
except ValueError:
# ALL species
t = random.choice(targets)
p = random.choice(penalties)
t2 = random.choice(targets)
p2 = random.choice(penalties)
target_dict = { "coordinate":t, "uids":t2 }
penalty_dict = { "coordinate":p, "uids":p2 }
costs_dict = {}
for a in random.sample([c.slug for c in Cost.objects.all()], nc):
costs_dict[a] = 1
sf = f
wp = create_wp(target_dict, penalty_dict, costs_dict, geography_list, sf)
print "####################################"
print 'targets', wp.input_targets
print 'penalties', wp.input_penalties
print 'costs', wp.input_relativecosts
wp.save()
#continue
while not wp.done:
time.sleep(2)
print " ", wp.status_html
inpenalties = json.loads(wp.input_penalties)
r = wp.results
#'ncosts, nspecies, sumpenalties, meanpenalties, scalefactor, numspeciesmet, numplannningunits'
fh.write(','.join([str(x) for x in [
sum(json.loads(wp.input_relativecosts).values()),
len(inpenalties.values()),
sum(inpenalties.values()),
mean(inpenalties.values()),
wp.input_scalefactor,
r['num_met'],
r['num_units']
]]))
fh.write('\n')
fh.flush()
if MODE == 'hardcoded':
scalefactors = [0.1, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.25, 1.5, 2, 4, 8, 16, 32]
num_units = [0, 3, 9, 17, 46, 57, 63, 73, 76, 79, 81, 82, 82, 83, 85, 90, 92, 93, 91]
num_species = [0, 1, 4, 10, 27, 38, 37, 54, 57, 58, 63, 59, 62, 66, 66, 69, 71, 71, 71]
#import matplotlib.pyplot as plt
#fig = plt.figure()
#plt.xlabel('Scale Factor')
#plt.ylabel('Number of Species Goals Met')
#ax = fig.add_subplot(111)
#ax.scatter(scalefactors, num_species)
#ax.set_xscale('log')
#plt.show()
| bsd-3-clause |
kyleabeauchamp/HMCNotes | code/correctness/old/compare_acceptance_with_shift.py | 1 | 1130 | import lb_loader
import pandas as pd
import simtk.openmm.app as app
import numpy as np
import simtk.openmm as mm
from simtk import unit as u
from openmmtools import hmc_integrators, testsystems
actual_timestep = 1.5 * u.femtoseconds
data = []
for sysname in ["ljbox", "switchedljbox", "shiftedljbox", "shortwater", "shortswitchedwater"]:
system, positions, groups, temperature, timestep, langevin_timestep, testsystem = lb_loader.load(sysname)
timestep = actual_timestep
integrator = hmc_integrators.HMCIntegrator(temperature, steps_per_hmc=25, timestep=timestep)
context = lb_loader.build(system, integrator, positions, temperature)
mm.LocalEnergyMinimizer.minimize(context)
integrator.step(500)
positions = context.getState(getPositions=True).getPositions()
integrator = hmc_integrators.HMCIntegrator(temperature, steps_per_hmc=25, timestep=timestep)
context = lb_loader.build(system, integrator, positions, temperature)
integrator.step(20000)
print(integrator.acceptance_rate)
data.append(dict(sysname=sysname, accept=integrator.acceptance_rate))
data = pd.DataFrame(data)
| gpl-2.0 |
nitish-tripathi/Simplery | ANN/Tensorflow_Tutorial/mnist.py | 1 | 1354 |
import pylab
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# read data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# show images
"""
img_ = mnist.train.images[1].reshape(28,28)
print np.argmax(mnist.train.labels[1])
pylab.imshow(img_)
pylab.show()
"""
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.matmul(x, W) + b
y_ = tf.placeholder(tf.float32, [None, 10])
#cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
costs = []
for _ in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
_, loss_val = sess.run([train_step, cross_entropy], feed_dict={x: batch_xs, y_: batch_ys})
#print "Cost: {0}".format(loss_val)
costs.append(loss_val)
correct_predictions = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
acc = sess.run(accuracy, feed_dict={x: mnist.train.images, y_: mnist.train.labels})
print acc
| mit |
ishank08/scikit-learn | sklearn/utils/tests/test_fixes.py | 28 | 3156 | # Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import pickle
import numpy as np
import math
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
from sklearn.utils.fixes import MaskedArray
from sklearn.utils.fixes import norm
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
def test_masked_array_obj_dtype_pickleable():
marr = MaskedArray([1, None, 'a'], dtype=object)
for mask in (True, False, [0, 1, 0]):
marr.mask = mask
marr_pickled = pickle.loads(pickle.dumps(marr))
assert_array_equal(marr.data, marr_pickled.data)
assert_array_equal(marr.mask, marr_pickled.mask)
def test_norm():
X = np.array([[-2, 4, 5],
[1, 3, -4],
[0, 0, 8],
[0, 0, 0]]).astype(float)
# Test various axis and order
assert_equal(math.sqrt(135), norm(X))
assert_array_equal(
np.array([math.sqrt(5), math.sqrt(25), math.sqrt(105)]),
norm(X, axis=0)
)
assert_array_equal(np.array([3, 7, 17]), norm(X, axis=0, ord=1))
assert_array_equal(np.array([2, 4, 8]), norm(X, axis=0, ord=np.inf))
assert_array_equal(np.array([0, 0, 0]), norm(X, axis=0, ord=-np.inf))
assert_array_equal(np.array([11, 8, 8, 0]), norm(X, axis=1, ord=1))
# Test shapes
assert_equal((), norm(X).shape)
assert_equal((3,), norm(X, axis=0).shape)
assert_equal((4,), norm(X, axis=1).shape)
| bsd-3-clause |
vermouthmjl/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 244 | 6011 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
neilslater/nn_practice | 01_basic_mlp/mlp_pytorch.py | 1 | 5143 | import numpy as np
import sklearn
import sklearn.datasets
import matplotlib.pyplot as plt
import math
import torch
from torch import nn
def get_data():
X, y = sklearn.datasets.make_moons(600, noise=0.30)
y = y.reshape([600,1])
X_train = X[:400]; y_train = y[:400]
X_cv = X[400:500]; y_cv = y[400:500]
X_test = X[500:]; y_test = y[500:]
return (X_train, y_train, X_cv, y_cv, X_test, y_test)
class NN():
def __init__(self, layer_sizes, hidden_activation='relu', l2_reg=0.0005, learning_rate=0.005, momentum=0.9):
layers = []
nlayers = len(layer_sizes) - 1
for l in range(nlayers):
n_in = layer_sizes[l]
n_out = layer_sizes[l+1]
layers.append(nn.Linear(n_in, n_out))
activation = hidden_activation
if l == nlayers - 1:
activation = 'sigmoid'
if activation=='sigmoid':
layers.append(nn.Sigmoid())
elif activation=='tanh':
layers.append(nn.Tanh())
elif activation=='relu':
layers.append(nn.ReLU())
elif activation=='linear':
# Do nothing
layers
else:
raise('Unrecognised activation function {}'.format(activation))
self.model = nn.Sequential(*layers)
self.loss = nn.BCELoss() # TODO: Use BCEWithLogitsLoss()
self.optimiser = torch.optim.SGD(self.model.parameters(), lr=learning_rate,
momentum=momentum, weight_decay=l2_reg, nesterov=True)
def __train_on_batch(self, X_batch, Y_batch):
xt = torch.Tensor(X_batch)
yt = torch.Tensor(Y_batch)
out = self.model(xt)
loss_val = self.loss(out, yt) # Change when using BCEWithLogitsLoss()
loss_val.backward()
self.optimiser.step()
self.optimiser.zero_grad()
batch_accuracy = (yt.byte() == (out > 0.5)).float().mean().item()
return (loss_val.item(), batch_accuracy)
def __fwd_cost(self, X_batch, Y_batch):
xt = torch.Tensor(X_batch)
yt = torch.Tensor(Y_batch)
out = self.model(xt)
loss_val = self.loss(out, yt) # Change when using BCEWithLogitsLoss()
batch_accuracy = (yt.byte() == (out > 0.5)).float().mean()
return (loss_val.item(), batch_accuracy)
def fit(self, X_train, y_train, X_cv, y_cv, epochs, batch_size):
train_accuracies = []
val_accuracies = []
train_costs = []
val_costs = []
nbatches = int(X_train.shape[0]/batch_size)
for epoch in range(epochs):
batch_idx = np.random.permutation(X_train.shape[0])
train_cost = 0.0
train_accuracy = 0.0
for b in range(nbatches):
this_batch = batch_idx[b*batch_size:(b+1)*batch_size]
X_batch = X_train[this_batch, :]
Y_batch = y_train[this_batch, :]
batch_cost, batch_accuracy = self.__train_on_batch(X_batch, Y_batch)
train_cost += batch_cost
train_accuracy += batch_accuracy
train_cost /= nbatches
train_accuracy /= nbatches
val_cost, val_accuracy = self.__fwd_cost(X_cv, y_cv)
train_accuracies.append(train_accuracy)
val_accuracies.append(val_accuracy)
train_costs.append(train_cost)
val_costs.append(val_cost)
return { 'acc': train_accuracies, 'val_acc': val_accuracies,
'cost': train_costs, 'val_cost': val_costs }
def predict_classes(self, X_batch):
A = self.model(torch.Tensor(X_batch))
return np.array(A > 0.5).astype('float') # Probably don't need to convert
def build_model(layer_sizes, hidden_activation='relu', l2_reg=0.0005, learning_rate=0.005, momentum=0.9):
return NN(layer_sizes, hidden_activation=hidden_activation, l2_reg=l2_reg,
learning_rate=learning_rate, momentum=momentum)
def train_model(model, X_train, y_train, X_cv, y_cv, epochs=200, batch_size=10):
return model.fit(X_train, y_train, X_cv, y_cv, epochs=epochs, batch_size=batch_size)
def test_model(model, X_test, y_test):
y_pred = model.predict_classes(X_test)
accuracy = np.mean( (y_pred == y_test).astype('float') )
print("Final accuracy {}".format(accuracy * 100))
def plot_history(history):
plt.plot(history['acc'])
plt.plot(history['val_acc'])
plt.title('Accuracy')
plt.ylabel('Accuracy vs Epoch')
plt.xlabel('Epoch')
plt.legend(['train', 'val'], loc='lower right')
plt.show()
def main():
np.random.seed(1234)
X_train, y_train, X_cv, y_cv, X_test, y_test = get_data()
model = build_model( layer_sizes=[X_train.shape[1], 20, 20, 1],
hidden_activation='relu',
l2_reg=0.0005,
learning_rate=0.005,
momentum=0.9 )
history = train_model(model, X_train, y_train, X_cv, y_cv)
test_model(model, X_test, y_test)
plot_history(history)
if __name__ == '__main__':
main()
| mit |
slinderman/pyhawkes | data/gifs/gif_demo.py | 1 | 5035 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
import pyhawkes.models
import imp
imp.reload(pyhawkes.models)
from pyhawkes.models import DiscreteTimeNetworkHawkesModelSpikeAndSlab
from pyhawkes.internals.network import ErdosRenyiFixedSparsity
from pyhawkes.plotting.plotting import plot_network
np.random.seed(0)
def demo(K=3, T=1000, dt_max=20, p=0.25):
"""
:param K: Number of nodes
:param T: Number of time bins to simulate
:param dt_max: Number of future time bins an event can influence
:param p: Sparsity of network
:return:
"""
###########################################################
# Generate synthetic data
###########################################################
network = ErdosRenyiFixedSparsity(K, p, v=1., allow_self_connections=False)
bkgd_hypers = {"alpha": 1.0, "beta": 20.0}
true_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(
K=K, dt_max=dt_max, bkgd_hypers=bkgd_hypers, network=network)
A_true = np.zeros((K,K))
A_true[0,1] = A_true[0,2] = 1
W_true = np.zeros((K,K))
W_true[0,1] = W_true[0,2] = 1.0
true_model.weight_model.A = A_true
true_model.weight_model.W = W_true
true_model.bias_model.lambda0[0] = 0.2
assert true_model.check_stability()
# Sample from the true model
S,R = true_model.generate(T=T, keep=True, print_interval=50)
plt.ion()
true_figure, _ = true_model.plot(color="#377eb8", T_slice=(0,100))
# Save the true figure
true_figure.savefig("gifs/true.gif")
###########################################################
# Create a test spike and slab model
###########################################################
test_model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(
K=K, dt_max=dt_max, network=network)
test_model.add_data(S)
# Initialize plots
test_figure, test_handles = test_model.plot(color="#e41a1c", T_slice=(0,100))
test_figure.savefig("gifs/test0.gif")
###########################################################
# Fit the test model with Gibbs sampling
###########################################################
N_samples = 100
samples = []
lps = []
for itr in range(N_samples):
print("Gibbs iteration ", itr)
test_model.resample_model()
lps.append(test_model.log_probability())
samples.append(test_model.copy_sample())
# Update plots
test_model.plot(handles=test_handles)
test_figure.savefig("gifs/test%d.gif" % (itr+1))
# ###########################################################
# # Analyze the samples
# ###########################################################
# analyze_samples(true_model, samples, lps)
def initialize_plots(true_model, test_model, S):
K = true_model.K
C = true_model.C
R = true_model.compute_rate(S=S)
T = S.shape[0]
# Plot the true network
plt.ion()
plot_network(true_model.weight_model.A,
true_model.weight_model.W)
plt.pause(0.001)
# Plot the true and inferred firing rate
plt.figure(2)
plt.plot(np.arange(T), R[:,0], '-k', lw=2)
plt.ion()
ln = plt.plot(np.arange(T), test_model.compute_rate()[:,0], '-r')[0]
plt.show()
plt.pause(0.001)
return ln, im_net
def update_plots(itr, test_model, S, ln, im_net):
K = test_model.K
C = test_model.C
T = S.shape[0]
plt.figure(2)
ln.set_data(np.arange(T), test_model.compute_rate()[:,0])
plt.title("\lambda_{%d}. Iteration %d" % (0, itr))
plt.pause(0.001)
plt.figure(4)
plt.title("W: Iteration %d" % itr)
im_net.set_data(test_model.weight_model.W_effective)
plt.pause(0.001)
def analyze_samples(true_model, samples, lps):
N_samples = len(samples)
# Compute sample statistics for second half of samples
A_samples = np.array([s.weight_model.A for s in samples])
W_samples = np.array([s.weight_model.W for s in samples])
lps = np.array(lps)
offset = N_samples // 2
A_mean = A_samples[offset:, ...].mean(axis=0)
W_mean = W_samples[offset:, ...].mean(axis=0)
plt.figure()
plt.plot(np.arange(N_samples), lps, 'k')
plt.xlabel("Iteration")
plt.ylabel("Log probability")
plt.show()
# Compute the link prediction accuracy curves
auc_A_mean = roc_auc_score(true_model.weight_model.A.ravel(),
A_mean.ravel())
auc_W_mean = roc_auc_score(true_model.weight_model.A.ravel(),
W_mean.ravel())
aucs = []
for A in A_samples:
aucs.append(roc_auc_score(true_model.weight_model.A.ravel(), A.ravel()))
plt.figure()
plt.plot(aucs, '-r')
plt.plot(auc_A_mean * np.ones_like(aucs), '--r')
plt.plot(auc_W_mean * np.ones_like(aucs), '--b')
plt.xlabel("Iteration")
plt.ylabel("Link prediction AUC")
plt.show()
plt.ioff()
plt.show()
demo() | mit |
tbtraltaa/medianshape | medianshape/experiment/median/test.py | 1 | 11430 | # encoding: utf-8
'''
2D Median surface embedded in 3D
--------------------------------
'''
from __future__ import absolute_import
import importlib
import os
import numpy as np
from scipy.spatial import Delaunay
from medianshape.simplicial import pointgen3d, mesh
from medianshape.simplicial.meshgen import meshgen2d, meshgen3d, get_mesh_surface
from medianshape import inout
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import cm
from medianshape.viz import plot2d, plot3d
from distmesh.plotting import axes_simpplot3d
from meshpy.tet import MeshInfo, Options, build
from medianshape.simplicial.utils import boundary_points
def func(x, y, sign=1):
return np.sin(np.pi*x)*np.cos(np.pi*y)
def sample_surf(scale, step=0.2):
'''
Return a tuple X, Y, Z with a test surface.
'''
x = y = np.arange(-4.0, 4.0, step)
X, Y = np.meshgrid(x, y)
from matplotlib.mlab import bivariate_normal
'''
Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
#Z3 = bivariate_normal(X, Y, 1, 1, -2, -2)
Z = Z2 - Z1
'''
# Ups
ZU1 = bivariate_normal(X,Y, 1.5, 1, 0,-2)
ZU2 = bivariate_normal(X, Y, 1.5, 1.5, 4, 1)
ZU3 = bivariate_normal(X, Y, 1, 1, -4, 1)
#ZU4 = bivariate_normal(X, Y, 1.5, 1.5, -4, -4)
#ZU5 = bivariate_normal(X, Y, 1, 1, 4, -4)
ZU4 = bivariate_normal(X, Y, 4, 0.5, 0, -4)
# Downs
ZD1 = bivariate_normal(X, Y, 1.5, 1, 0, 1)
ZD2 = bivariate_normal(X, Y, 1.5, 1.5, -4, -2)
ZD3 = bivariate_normal(X, Y, 1, 1, 4, -2)
ZD4 = bivariate_normal(X, Y, 4, 1, 0, 4)
Z = ZU1 + ZU2 + ZU3 - ZD1 - ZD2 - ZD3 - ZD4
Zmax = np.amax(Z)
X = X * scale[0]/4.0
Y = Y * scale[1]/4.0
Z = Z/Zmax * scale[2]
return X, Y, Z
def interpolate_surf(points, values, ipoints, method = "cubic"):
from scipy.interpolate import griddata
return griddata(points, values, ipoints, method= method)
def surfaces(bbox=[-10,-10,-10, 10,10,10], l=0.5, overlaps =[0.4, 0.7]):
'''
'''
# Generating point grids for two surfaces
xmin = bbox[0]
xmax = bbox[3]
ymin = bbox[1]
ymax = bbox[4]
zmin = bbox[2]
zmax = bbox[5]
xlen = xmax - xmin
y = np.arange(ymin, ymax, l)
y = np.append(y, ymax)
xmin_points = np.ndarray((len(y), 2))
xmin_points[:,0] = xmin
xmin_points[:, 1] = y
xmax_points = np.ndarray((len(y), 2))
xmax_points[:,0] = xmax
xmax_points[:, 1] = y
xoverlaps = [xmin + xlen*o for o in overlaps]
xo_points = None
for i, o in enumerate(xoverlaps):
xo_tmp = np.ndarray((len(y), 2))
xo_tmp[:,0] = o
xo_tmp[:, 1] = y
if i == 0:
xo_points = xo_tmp
else:
xo_points = np.vstack((xo_points, xo_tmp))
fixed_points = np.concatenate((xmin_points, xmax_points, xo_points), axis=0)
print fixed_points
mesh = meshgen2d([xmin, ymin, xmax, ymax], l, fixed_points, include_corners=False)
#plot2d.plotmesh2d(mesh)
X, Y, Z1 = sample_surf([xmax*0.8, ymax*0.8, zmax*0.2])
Z2 = -Z1 - zmax*0.3
Z1 = Z1 + zmax*0.3
#z2 = elevate_surf(mesh.points[:,0], mesh.points[:,1])
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(X, Y, Z1, rstride=1, cstride=1, cmap=cm.winter,
linewidth=0, antialiased=False)
#surf = ax.plot_surface(X, Y, Z2, rstride=1, cstride=1, cmap=cm.autumn,
# linewidth=0, antialiased=False)
plt.show()
sample_points = np.hstack((X.reshape(-1,1), Y.reshape(-1,1)))
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.scatter(X, Y, Z1.reshape(-1,1), color='b')
surf = ax.scatter(X, Y, Z2.reshape(-1,1), color='r')
plt.show()
Z1 = interpolate_surf(sample_points, Z1.reshape(-1,1), mesh.points)
Z2 = interpolate_surf(sample_points, Z2.reshape(-1,1), mesh.points)
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.scatter(mesh.points[:,0], mesh.points[:,1], Z1, color='b')
surf = ax.scatter(mesh.points[:,0],mesh.points[:,1], Z2, color='r')
plt.show()
Z1[np.argwhere(mesh.points[:,1]==ymin)] = 0
Z1[np.argwhere(mesh.points[:,1]==ymax)] = 0
'''
for xo in xoverlaps:
Z1[np.argwhere(mesh.points[:,0]==xo)] = 0
'''
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.scatter(mesh.points[:,0], mesh.points[:,1], Z1, color='b')
surf = ax.scatter(mesh.points[:,0],mesh.points[:,1], Z2, color='r')
plt.show()
exit()
#surf = ax.scatter(mesh.points[:,0], mesh.points[:,1], z2, color="r")
ax.set_zlim(-1, 1)
plt.show()
X, Y = np.meshgrid(x, y, sparse=False)
# Z coordinate of surface1
Z1 = 7*np.sin(X)
# Z coordinate of surface2
Z2 = -7*np.sin(X)
# Illustrating the surfaces
# Triangulating the surfaces
s1 = np.concatenate((X.reshape(-1,1), Y.reshape(-1,1), Z1.reshape(-1,1)), axis=1).reshape(-1,3)
s2 = np.concatenate((X.reshape(-1,1), Y.reshape(-1,1), Z2.reshape(-1,1)), axis=1).reshape(-1,3)
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.scatter(s1[:,0], s1[:,1], s1[:,2])
surf = ax.scatter(s2[:,0], s2[:,1], s2[:,2])
ax.set_zlim(-10, 10)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
plt.show()
leftline1 = np.where(s1[:,0]==xmin)[0]
rightline1 = np.where(s1[:, 0] == xmax)[0]
backline1 = np.where(s1[:,1]==xmin)[0]
frontline1 = np.where(s1[:, 1] == xmax)[0]
b1 = np.unique(np.concatenate((leftline1, rightline1, backline1, frontline1), axis=0))
print b1
leftline2 = np.where(s2[:,0]==xmin)[0]
rightline2 = np.where(s2[:, 0] == xmax)[0]
backline2 = np.where(s2[:,1]==xmin)[0]
frontline2 = np.where(s2[:, 1] == xmax)[0]
b2 = np.unique(np.concatenate((leftline2, rightline2, backline2, frontline2), axis=0))
intersection = np.where(s1[:,0]== intersect)[0]
closed_boundary = np.concatenate((leftline1, rightline2), axis=0)
print b2
print leftline1
print rightline1
print leftline2
print leftline2
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.scatter(backline1[:,0], backline1[:,1], backline1[:,2])
surf = ax.scatter(frontline1[:,0],frontline1[:,1],frontline1[:,2])
surf = ax.scatter(leftline1[:,0], leftline1[:,1], leftline1[:,2])
surf = ax.scatter(rightline1[:,0], rightline1[:,1], rightline1[:,2])
surf = ax.scatter(backline2[:,0], backline2[:,1], backline2[:,2])
surf = ax.scatter(frontline2[:,0],frontline2[:,1],frontline2[:,2])
surf = ax.scatter(leftline2[:,0], leftline2[:,1], leftline2[:,2])
surf = ax.scatter(rightline2[:,0], rightline2[:,1], rightline2[:,2])
ax.set_zlim(-10, 10)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
plt.show()
s1_complex = Delaunay(s1[:,:-1])
s2_complex = Delaunay(s2[:,:-1])
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.set_zlim(-10, 10)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.plot_trisurf(s1[:,0], s1[:,1], s1[:,2], triangles = s1_complex.simplices, cmap=cm.autumn)
ax.plot_trisurf(s2[:,0], s2[:,1], s2[:,2], triangles = s2_complex.simplices, cmap=cm.winter)
plt.show()
exit()
s_points = np.vstack((s1, s2))
s1_triangles = s1_complex.simplices
s2_triangles = s2_complex.simplices + len(s1_complex.points)
surfaces = np.vstack((s1_triangles, s2_triangles))
# Plotting the surfaces
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.set_zlim(-10, 10)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.plot_trisurf(s_points[:,0], s_points[:,1], s_points[:,2], triangles = s1_triangles, cmap=cm.autumn)
ax.plot_trisurf(s_points[:,0], s_points[:,1], s_points[:,2], triangles = s2_triangles, cmap=cm.winter)
plt.show()
'''
# Plotting the surfaces
fig = plt.figure()
ax = fig.gca(projection="3d")
ax.set_zlim(-10, 10)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
ax.plot_trisurf(points[:,0], points[:,1], points[:,2], triangles = surfaces, cmap=cm.autumn)
plt.show()
'''
# Grid Points sampled from boundary box
corners = boundary_points(bbox)
box = np.array(bbox).reshape(2, -1)
points = np.mgrid[tuple(slice(min, max+1, 1) for min, max in box.T)]
points = points.reshape(3, -1).T
# Points of PLC.
# Uncomment the line below to include sample poinds in bbox along with the surface points
#points = np.concatenate((s_points, points), axis=0).reshape(-1,3)
points = np.concatenate((s_points, corners), axis=0).reshape(-1,3)
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.scatter(points[:,0], points[:,1], points[:,2])
ax.set_zlim(-12, 12)
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
plt.show()
# Saving the current PLC to a file in .smesh format
with open('surf.smesh', 'w') as f:
f.write('%d %d %d %d\n'%(len(points), 3, 0,0))
for i, p in enumerate(points):
f.write("%d %f %f %f\n"%(i, p[0], p[1], p[2]))
f.write('%d %d\n'%(len(surfaces), 0))
for t in surfaces:
f.write("%d %d %d %d\n"%(3, t[0], t[1], t[2]))
f.write("%d\n"%0)
f.write("%d\n"%0)
np.savetxt("surface.face", surfaces)
np.savetxt("points.node", points)
# Build the mesh using Tetgen
mesh_info = MeshInfo()
mesh_info.set_points(points.tolist())
mesh_info.set_facets((surfaces.reshape(-1, 3)).tolist())
print len(mesh_info.facets)
opts = Options("YVfeq", verbose=True, nobisect=True, facesout=True, edgesout=True, docheck=True) # Overriding 'pq' with no options or flags
#opts = Options("", verbose=True, nobisect=True, facesout=True, edgesout=True, docheck=True, insertaddpoints=True) # Overriding 'pq' with no options or flags
mesh = build(mesh_info, options=opts, volume_constraints=True, max_volume=1)
# Plot the mesh
ax = plt.gca(projection='3d')
fig = ax.figure
m1 = np.amin(bbox[0:3])
m2 = np.amax(bbox[3:])
ax.set_xlim([m1, m2])
ax.set_ylim([m1, m2])
ax.set_zlim([m1, m2])
ax.set_aspect('equal')
axes_simpplot3d(ax, np.array(list(mesh.points)), np.array(list(mesh.elements)))
plt.show()
# Write it as a file in vtk format, so you can use Paraview to see it.
mesh.write_vtk("test.vtk")
input_surfaces = np.zeros((2,len(mesh.faces)))
inputs = list (s1_triangles, s2_triangles)
for i, s in enumerate(inputs):
for j, t in enumerate(np.array(mesh.faces)):
if np.all(s==t):
input_surfaces[i,j] = 1
lambdas = [0.001]
mus = [0.00001]
mesh1 = Mesh3D()
mesh1.simplices = np.array(mesh.elements)
mesh1.triangles = np.array(mesh.faces)
mesh1.edges = np.array(mesh.edges)
mesh1.points = np.array(mesh.points)
return mesh1, mesh1.simplices, mesh1.triangles, input_surfaces, lambdas, mus
if __name__ == "__main__":
surfaces()
| gpl-3.0 |
wronk/mne-python | mne/viz/epochs.py | 1 | 62947 | """Functions to plot epochs data
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Jaakko Leppakangas <jaeilepp@student.jyu.fi>
#
# License: Simplified BSD
from functools import partial
import copy
import numpy as np
from ..utils import verbose, get_config, set_config, logger, warn
from ..io.pick import pick_types, channel_type
from ..io.proj import setup_proj
from ..fixes import Counter, _in1d
from ..time_frequency import psd_multitaper
from .utils import (tight_layout, figure_nobar, _toggle_proj, _toggle_options,
_layout_figure, _setup_vmin_vmax, _channels_changed,
_plot_raw_onscroll, _onclick_help, plt_show)
from ..defaults import _handle_default
def plot_epochs_image(epochs, picks=None, sigma=0., vmin=None,
vmax=None, colorbar=True, order=None, show=True,
units=None, scalings=None, cmap='RdBu_r',
fig=None, overlay_times=None):
"""Plot Event Related Potential / Fields image
Parameters
----------
epochs : instance of Epochs
The epochs.
picks : int | array-like of int | None
The indices of the channels to consider. If None, the first
five good channels are plotted.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image. If 0., no smoothing is applied.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times).
show : bool
Show figure if True.
units : dict | None
The units of the channel types used for axes lables. If None,
defaults to `units=dict(eeg='uV', grad='fT/cm', mag='fT')`.
scalings : dict | None
The scalings of the channel types to be applied for plotting.
If None, defaults to `scalings=dict(eeg=1e6, grad=1e13, mag=1e15,
eog=1e6)`.
cmap : matplotlib colormap
Colormap.
fig : matplotlib figure | None
Figure instance to draw the image to. Figure must contain two axes for
drawing the single trials and evoked responses. If None a new figure is
created. Defaults to None.
overlay_times : array-like, shape (n_epochs,) | None
If not None the parameter is interpreted as time instants in seconds
and is added to the image. It is typically useful to display reaction
times. Note that it is defined with respect to the order
of epochs such that overlay_times[0] corresponds to epochs[0].
Returns
-------
figs : lists of matplotlib figures
One figure per channel displayed.
"""
from scipy import ndimage
units = _handle_default('units', units)
scalings = _handle_default('scalings', scalings)
import matplotlib.pyplot as plt
if picks is None:
picks = pick_types(epochs.info, meg=True, eeg=True, ref_meg=False,
exclude='bads')[:5]
if set(units.keys()) != set(scalings.keys()):
raise ValueError('Scalings and units must have the same keys.')
picks = np.atleast_1d(picks)
if fig is not None and len(picks) > 1:
raise ValueError('Only single pick can be drawn to a figure.')
evoked = epochs.average(picks)
data = epochs.get_data()[:, picks, :]
scale_vmin = True if vmin is None else False
scale_vmax = True if vmax is None else False
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
if overlay_times is not None and len(overlay_times) != len(data):
raise ValueError('size of overlay_times parameter (%s) do not '
'match the number of epochs (%s).'
% (len(overlay_times), len(data)))
if overlay_times is not None:
overlay_times = np.array(overlay_times)
times_min = np.min(overlay_times)
times_max = np.max(overlay_times)
if ((times_min < epochs.tmin) or (times_max > epochs.tmax)):
warn('Some values in overlay_times fall outside of the epochs '
'time interval (between %s s and %s s)'
% (epochs.tmin, epochs.tmax))
figs = list()
for i, (this_data, idx) in enumerate(zip(np.swapaxes(data, 0, 1), picks)):
if fig is None:
this_fig = plt.figure()
else:
this_fig = fig
figs.append(this_fig)
ch_type = channel_type(epochs.info, idx)
if ch_type not in scalings:
# We know it's not in either scalings or units since keys match
raise KeyError('%s type not in scalings and units' % ch_type)
this_data *= scalings[ch_type]
this_order = order
if callable(order):
this_order = order(epochs.times, this_data)
if this_order is not None and (len(this_order) != len(this_data)):
raise ValueError('size of order parameter (%s) does not '
'match the number of epochs (%s).'
% (len(this_order), len(this_data)))
this_overlay_times = None
if overlay_times is not None:
this_overlay_times = overlay_times
if this_order is not None:
this_order = np.asarray(this_order)
this_data = this_data[this_order]
if this_overlay_times is not None:
this_overlay_times = this_overlay_times[this_order]
if sigma > 0.:
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma,
axis=0)
plt.figure(this_fig.number)
ax1 = plt.subplot2grid((3, 10), (0, 0), colspan=9, rowspan=2)
if scale_vmin:
vmin *= scalings[ch_type]
if scale_vmax:
vmax *= scalings[ch_type]
im = ax1.imshow(this_data,
extent=[1e3 * epochs.times[0], 1e3 * epochs.times[-1],
0, len(data)],
aspect='auto', origin='lower', interpolation='nearest',
vmin=vmin, vmax=vmax, cmap=cmap)
if this_overlay_times is not None:
plt.plot(1e3 * this_overlay_times, 0.5 + np.arange(len(this_data)),
'k', linewidth=2)
ax2 = plt.subplot2grid((3, 10), (2, 0), colspan=9, rowspan=1)
if colorbar:
ax3 = plt.subplot2grid((3, 10), (0, 9), colspan=1, rowspan=3)
ax1.set_title(epochs.ch_names[idx])
ax1.set_ylabel('Epochs')
ax1.axis('auto')
ax1.axis('tight')
ax1.axvline(0, color='m', linewidth=3, linestyle='--')
evoked_data = scalings[ch_type] * evoked.data[i]
ax2.plot(1e3 * evoked.times, evoked_data)
ax2.set_xlabel('Time (ms)')
ax2.set_xlim([1e3 * evoked.times[0], 1e3 * evoked.times[-1]])
ax2.set_ylabel(units[ch_type])
evoked_vmin = min(evoked_data) * 1.1 if scale_vmin else vmin
evoked_vmax = max(evoked_data) * 1.1 if scale_vmax else vmax
if scale_vmin or scale_vmax:
evoked_vmax = max(np.abs([evoked_vmax, evoked_vmin]))
evoked_vmin = -evoked_vmax
ax2.set_ylim([evoked_vmin, evoked_vmax])
ax2.axvline(0, color='m', linewidth=3, linestyle='--')
if colorbar:
plt.colorbar(im, cax=ax3)
tight_layout(fig=this_fig)
plt_show(show)
return figs
def plot_drop_log(drop_log, threshold=0, n_max_plot=20, subject='Unknown',
color=(0.9, 0.9, 0.9), width=0.8, ignore=('IGNORED',),
show=True):
"""Show the channel stats based on a drop_log from Epochs
Parameters
----------
drop_log : list of lists
Epoch drop log from Epochs.drop_log.
threshold : float
The percentage threshold to use to decide whether or not to
plot. Default is zero (always plot).
n_max_plot : int
Maximum number of channels to show stats for.
subject : str
The subject name to use in the title of the plot.
color : tuple | str
Color to use for the bars.
width : float
Width of the bars.
ignore : list
The drop reasons to ignore.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
from ..epochs import _drop_log_stats
perc = _drop_log_stats(drop_log, ignore)
scores = Counter([ch for d in drop_log for ch in d if ch not in ignore])
ch_names = np.array(list(scores.keys()))
fig = plt.figure()
if perc < threshold or len(ch_names) == 0:
plt.text(0, 0, 'No drops')
return fig
n_used = 0
for d in drop_log: # "d" is the list of drop reasons for each epoch
if len(d) == 0 or any(ch not in ignore for ch in d):
n_used += 1 # number of epochs not ignored
counts = 100 * np.array(list(scores.values()), dtype=float) / n_used
n_plot = min(n_max_plot, len(ch_names))
order = np.flipud(np.argsort(counts))
plt.title('%s: %0.1f%%' % (subject, perc))
x = np.arange(n_plot)
plt.bar(x, counts[order[:n_plot]], color=color, width=width)
plt.xticks(x + width / 2.0, ch_names[order[:n_plot]], rotation=45,
horizontalalignment='right')
plt.tick_params(axis='x', which='major', labelsize=10)
plt.ylabel('% of epochs rejected')
plt.xlim((-width / 2.0, (n_plot - 1) + width * 3 / 2))
plt.grid(True, axis='y')
tight_layout(pad=1, fig=fig)
plt_show(show)
return fig
def _draw_epochs_axes(epoch_idx, good_ch_idx, bad_ch_idx, data, times, axes,
title_str, axes_handler):
"""Aux functioin"""
this = axes_handler[0]
for ii, data_, ax in zip(epoch_idx, data, axes):
for l, d in zip(ax.lines, data_[good_ch_idx]):
l.set_data(times, d)
if bad_ch_idx is not None:
bad_lines = [ax.lines[k] for k in bad_ch_idx]
for l, d in zip(bad_lines, data_[bad_ch_idx]):
l.set_data(times, d)
if title_str is not None:
ax.set_title(title_str % ii, fontsize=12)
ax.set_ylim(data.min(), data.max())
ax.set_yticks(list())
ax.set_xticks(list())
if vars(ax)[this]['reject'] is True:
# memorizing reject
for l in ax.lines:
l.set_color((0.8, 0.8, 0.8))
ax.get_figure().canvas.draw()
else:
# forgetting previous reject
for k in axes_handler:
if k == this:
continue
if vars(ax).get(k, {}).get('reject', None) is True:
for l in ax.lines[:len(good_ch_idx)]:
l.set_color('k')
if bad_ch_idx is not None:
for l in ax.lines[-len(bad_ch_idx):]:
l.set_color('r')
ax.get_figure().canvas.draw()
break
def _epochs_navigation_onclick(event, params):
"""Aux function"""
import matplotlib.pyplot as plt
p = params
here = None
if event.inaxes == p['back'].ax:
here = 1
elif event.inaxes == p['next'].ax:
here = -1
elif event.inaxes == p['reject-quit'].ax:
if p['reject_idx']:
p['epochs'].drop(p['reject_idx'])
plt.close(p['fig'])
plt.close(event.inaxes.get_figure())
if here is not None:
p['idx_handler'].rotate(here)
p['axes_handler'].rotate(here)
this_idx = p['idx_handler'][0]
_draw_epochs_axes(this_idx, p['good_ch_idx'], p['bad_ch_idx'],
p['data'][this_idx],
p['times'], p['axes'], p['title_str'],
p['axes_handler'])
# XXX don't ask me why
p['axes'][0].get_figure().canvas.draw()
def _epochs_axes_onclick(event, params):
"""Aux function"""
reject_color = (0.8, 0.8, 0.8)
ax = event.inaxes
if event.inaxes is None:
return
p = params
here = vars(ax)[p['axes_handler'][0]]
if here.get('reject', None) is False:
idx = here['idx']
if idx not in p['reject_idx']:
p['reject_idx'].append(idx)
for l in ax.lines:
l.set_color(reject_color)
here['reject'] = True
elif here.get('reject', None) is True:
idx = here['idx']
if idx in p['reject_idx']:
p['reject_idx'].pop(p['reject_idx'].index(idx))
good_lines = [ax.lines[k] for k in p['good_ch_idx']]
for l in good_lines:
l.set_color('k')
if p['bad_ch_idx'] is not None:
bad_lines = ax.lines[-len(p['bad_ch_idx']):]
for l in bad_lines:
l.set_color('r')
here['reject'] = False
ax.get_figure().canvas.draw()
def plot_epochs(epochs, picks=None, scalings=None, n_epochs=20,
n_channels=20, title=None, show=True, block=False):
""" Visualize epochs
Bad epochs can be marked with a left click on top of the epoch. Bad
channels can be selected by clicking the channel name on the left side of
the main axes. Calling this function drops all the selected bad epochs as
well as bad epochs marked beforehand with rejection parameters.
Parameters
----------
epochs : instance of Epochs
The epochs object
picks : array-like of int | None
Channels to be included. If None only good data channels are used.
Defaults to None
scalings : dict | None
Scale factors for the traces. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1, resp=1, chpi=1e-4)
n_epochs : int
The number of epochs per view. Defaults to 20.
n_channels : int
The number of channels per view. Defaults to 20.
title : str | None
The title of the window. If None, epochs name will be displayed.
Defaults to None.
show : bool
Show figure if True. Defaults to True
block : bool
Whether to halt program execution until the figure is closed.
Useful for rejecting bad trials on the fly by clicking on an epoch.
Defaults to False.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
Notes
-----
The arrow keys (up/down/left/right) can be used to navigate between
channels and epochs and the scaling can be adjusted with - and + (or =)
keys, but this depends on the backend matplotlib is configured to use
(e.g., mpl.use(``TkAgg``) should work). Full screen mode can be toggled
with f11 key. The amount of epochs and channels per view can be adjusted
with home/end and page down/page up keys. Butterfly plot can be toggled
with ``b`` key. Right mouse click adds a vertical line to the plot.
"""
epochs.drop_bad()
scalings = _handle_default('scalings_plot_raw', scalings)
projs = epochs.info['projs']
params = {'epochs': epochs,
'info': copy.deepcopy(epochs.info),
'bad_color': (0.8, 0.8, 0.8),
't_start': 0,
'histogram': None}
params['label_click_fun'] = partial(_pick_bad_channels, params=params)
_prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
title, picks)
_prepare_projectors(params)
_layout_figure(params)
callback_close = partial(_close_event, params=params)
params['fig'].canvas.mpl_connect('close_event', callback_close)
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return params['fig']
@verbose
def plot_epochs_psd(epochs, fmin=0, fmax=np.inf, tmin=None, tmax=None,
proj=False, bandwidth=None, adaptive=False, low_bias=True,
normalization='length', picks=None, ax=None, color='black',
area_mode='std', area_alpha=0.33, dB=True, n_jobs=1,
show=True, verbose=None):
"""Plot the power spectral density across epochs
Parameters
----------
epochs : instance of Epochs
The epochs object
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
bandwidth : float
The bandwidth of the multi taper windowing function in Hz. The default
value is a window half-bandwidth of 4.
adaptive : bool
Use adaptive weights to combine the tapered spectra into PSD
(slow, use n_jobs >> 1 to speed up computation).
low_bias : bool
Only use tapers with more than 90% spectral concentration within
bandwidth.
normalization : str
Either "full" or "length" (default). If "full", the PSD will
be normalized by the sampling rate as well as the length of
the signal (as in nitime).
picks : array-like of int | None
List of channels to use.
ax : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
color : str | tuple
A matplotlib-compatible color to use.
area_mode : str | None
Mode for plotting area. If 'std', the mean +/- 1 STD (across channels)
will be plotted. If 'range', the min and max (across channels) will be
plotted. Bad channels will be excluded from these calculations.
If None, no area will be plotted.
area_alpha : float
Alpha for the area.
dB : bool
If True, transform data to decibels.
n_jobs : int
Number of jobs to run in parallel.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
from .raw import _set_psd_plot_params
fig, picks_list, titles_list, ax_list, make_label = _set_psd_plot_params(
epochs.info, proj, picks, ax, area_mode)
for ii, (picks, title, ax) in enumerate(zip(picks_list, titles_list,
ax_list)):
psds, freqs = psd_multitaper(epochs, picks=picks, fmin=fmin,
fmax=fmax, tmin=tmin, tmax=tmax,
bandwidth=bandwidth, adaptive=adaptive,
low_bias=low_bias,
normalization=normalization, proj=proj,
n_jobs=n_jobs)
# Convert PSDs to dB
if dB:
psds = 10 * np.log10(psds)
unit = 'dB'
else:
unit = 'power'
# mean across epochs and channels
psd_mean = np.mean(psds, axis=0).mean(axis=0)
if area_mode == 'std':
# std across channels
psd_std = np.std(np.mean(psds, axis=0), axis=0)
hyp_limits = (psd_mean - psd_std, psd_mean + psd_std)
elif area_mode == 'range':
hyp_limits = (np.min(np.mean(psds, axis=0), axis=0),
np.max(np.mean(psds, axis=0), axis=0))
else: # area_mode is None
hyp_limits = None
ax.plot(freqs, psd_mean, color=color)
if hyp_limits is not None:
ax.fill_between(freqs, hyp_limits[0], y2=hyp_limits[1],
color=color, alpha=area_alpha)
if make_label:
if ii == len(picks_list) - 1:
ax.set_xlabel('Freq (Hz)')
if ii == len(picks_list) // 2:
ax.set_ylabel('Power Spectral Density (%s/Hz)' % unit)
ax.set_title(title)
ax.set_xlim(freqs[0], freqs[-1])
if make_label:
tight_layout(pad=0.1, h_pad=0.1, w_pad=0.1, fig=fig)
plt_show(show)
return fig
def _prepare_mne_browse_epochs(params, projs, n_channels, n_epochs, scalings,
title, picks, order=None):
"""Helper for setting up the mne_browse_epochs window."""
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.collections import LineCollection
from matplotlib.colors import colorConverter
epochs = params['epochs']
if picks is None:
picks = _handle_picks(epochs)
if len(picks) < 1:
raise RuntimeError('No appropriate channels found. Please'
' check your picks')
picks = sorted(picks)
# Reorganize channels
inds = list()
types = list()
for t in ['grad', 'mag']:
idxs = pick_types(params['info'], meg=t, ref_meg=False, exclude=[])
if len(idxs) < 1:
continue
mask = _in1d(idxs, picks, assume_unique=True)
inds.append(idxs[mask])
types += [t] * len(inds[-1])
pick_kwargs = dict(meg=False, ref_meg=False, exclude=[])
if order is None:
order = ['eeg', 'seeg', 'ecog', 'eog', 'ecg', 'emg', 'ref_meg', 'stim',
'resp', 'misc', 'chpi', 'syst', 'ias', 'exci']
for ch_type in order:
pick_kwargs[ch_type] = True
idxs = pick_types(params['info'], **pick_kwargs)
if len(idxs) < 1:
continue
mask = _in1d(idxs, picks, assume_unique=True)
inds.append(idxs[mask])
types += [ch_type] * len(inds[-1])
pick_kwargs[ch_type] = False
inds = np.concatenate(inds).astype(int)
if not len(inds) == len(picks):
raise RuntimeError('Some channels not classified. Please'
' check your picks')
ch_names = [params['info']['ch_names'][x] for x in inds]
# set up plotting
size = get_config('MNE_BROWSE_RAW_SIZE')
n_epochs = min(n_epochs, len(epochs.events))
duration = len(epochs.times) * n_epochs
n_channels = min(n_channels, len(picks))
if size is not None:
size = size.split(',')
size = tuple(float(s) for s in size)
if title is None:
title = epochs.name
if epochs.name is None or len(title) == 0:
title = ''
fig = figure_nobar(facecolor='w', figsize=size, dpi=80)
fig.canvas.set_window_title('mne_browse_epochs')
ax = plt.subplot2grid((10, 15), (0, 1), colspan=13, rowspan=9)
ax.annotate(title, xy=(0.5, 1), xytext=(0, ax.get_ylim()[1] + 15),
ha='center', va='bottom', size=12, xycoords='axes fraction',
textcoords='offset points')
color = _handle_default('color', None)
ax.axis([0, duration, 0, 200])
ax2 = ax.twiny()
ax2.set_zorder(-1)
ax2.axis([0, duration, 0, 200])
ax_hscroll = plt.subplot2grid((10, 15), (9, 1), colspan=13)
ax_hscroll.get_yaxis().set_visible(False)
ax_hscroll.set_xlabel('Epochs')
ax_vscroll = plt.subplot2grid((10, 15), (0, 14), rowspan=9)
ax_vscroll.set_axis_off()
ax_vscroll.add_patch(mpl.patches.Rectangle((0, 0), 1, len(picks),
facecolor='w', zorder=3))
ax_help_button = plt.subplot2grid((10, 15), (9, 0), colspan=1)
help_button = mpl.widgets.Button(ax_help_button, 'Help')
help_button.on_clicked(partial(_onclick_help, params=params))
# populate vertical and horizontal scrollbars
for ci in range(len(picks)):
if ch_names[ci] in params['info']['bads']:
this_color = params['bad_color']
else:
this_color = color[types[ci]]
ax_vscroll.add_patch(mpl.patches.Rectangle((0, ci), 1, 1,
facecolor=this_color,
edgecolor=this_color,
zorder=4))
vsel_patch = mpl.patches.Rectangle((0, 0), 1, n_channels, alpha=0.5,
edgecolor='w', facecolor='w', zorder=5)
ax_vscroll.add_patch(vsel_patch)
ax_vscroll.set_ylim(len(types), 0)
ax_vscroll.set_title('Ch.')
# populate colors list
type_colors = [colorConverter.to_rgba(color[c]) for c in types]
colors = list()
for color_idx in range(len(type_colors)):
colors.append([type_colors[color_idx]] * len(epochs.events))
lines = list()
n_times = len(epochs.times)
for ch_idx in range(n_channels):
if len(colors) - 1 < ch_idx:
break
lc = LineCollection(list(), antialiased=False, linewidths=0.5,
zorder=3, picker=3.)
ax.add_collection(lc)
lines.append(lc)
times = epochs.times
data = np.zeros((params['info']['nchan'], len(times) * n_epochs))
ylim = (25., 0.) # Hardcoded 25 because butterfly has max 5 rows (5*5=25).
# make shells for plotting traces
offset = ylim[0] / n_channels
offsets = np.arange(n_channels) * offset + (offset / 2.)
times = np.arange(len(times) * len(epochs.events))
epoch_times = np.arange(0, len(times), n_times)
ax.set_yticks(offsets)
ax.set_ylim(ylim)
ticks = epoch_times + 0.5 * n_times
ax.set_xticks(ticks)
ax2.set_xticks(ticks[:n_epochs])
labels = list(range(1, len(ticks) + 1)) # epoch numbers
ax.set_xticklabels(labels)
ax2.set_xticklabels(labels)
xlim = epoch_times[-1] + len(epochs.times)
ax_hscroll.set_xlim(0, xlim)
vertline_t = ax_hscroll.text(0, 1, '', color='y', va='bottom', ha='right')
# fit horizontal scroll bar ticks
hscroll_ticks = np.arange(0, xlim, xlim / 7.0)
hscroll_ticks = np.append(hscroll_ticks, epoch_times[-1])
hticks = list()
for tick in hscroll_ticks:
hticks.append(epoch_times.flat[np.abs(epoch_times - tick).argmin()])
hlabels = [x / n_times + 1 for x in hticks]
ax_hscroll.set_xticks(hticks)
ax_hscroll.set_xticklabels(hlabels)
for epoch_idx in range(len(epoch_times)):
ax_hscroll.add_patch(mpl.patches.Rectangle((epoch_idx * n_times, 0),
n_times, 1, facecolor='w',
edgecolor='w', alpha=0.6))
hsel_patch = mpl.patches.Rectangle((0, 0), duration, 1,
edgecolor='k',
facecolor=(0.75, 0.75, 0.75),
alpha=0.25, linewidth=1, clip_on=False)
ax_hscroll.add_patch(hsel_patch)
text = ax.text(0, 0, 'blank', zorder=3, verticalalignment='baseline',
ha='left', fontweight='bold')
text.set_visible(False)
params.update({'fig': fig,
'ax': ax,
'ax2': ax2,
'ax_hscroll': ax_hscroll,
'ax_vscroll': ax_vscroll,
'vsel_patch': vsel_patch,
'hsel_patch': hsel_patch,
'lines': lines,
'projs': projs,
'ch_names': ch_names,
'n_channels': n_channels,
'n_epochs': n_epochs,
'scalings': scalings,
'duration': duration,
'ch_start': 0,
'colors': colors,
'def_colors': type_colors, # don't change at runtime
'picks': picks,
'bads': np.array(list(), dtype=int),
'data': data,
'times': times,
'epoch_times': epoch_times,
'offsets': offsets,
'labels': labels,
'scale_factor': 1.0,
'butterfly_scale': 1.0,
'fig_proj': None,
'types': np.array(types),
'inds': inds,
'vert_lines': list(),
'vertline_t': vertline_t,
'butterfly': False,
'text': text,
'ax_help_button': ax_help_button, # needed for positioning
'help_button': help_button, # reference needed for clicks
'fig_options': None,
'settings': [True, True, True, True],
'image_plot': None})
params['plot_fun'] = partial(_plot_traces, params=params)
# callbacks
callback_scroll = partial(_plot_onscroll, params=params)
fig.canvas.mpl_connect('scroll_event', callback_scroll)
callback_click = partial(_mouse_click, params=params)
fig.canvas.mpl_connect('button_press_event', callback_click)
callback_key = partial(_plot_onkey, params=params)
fig.canvas.mpl_connect('key_press_event', callback_key)
callback_resize = partial(_resize_event, params=params)
fig.canvas.mpl_connect('resize_event', callback_resize)
fig.canvas.mpl_connect('pick_event', partial(_onpick, params=params))
params['callback_key'] = callback_key
# Draw event lines for the first time.
_plot_vert_lines(params)
def _prepare_projectors(params):
""" Helper for setting up the projectors for epochs browser """
import matplotlib.pyplot as plt
import matplotlib as mpl
epochs = params['epochs']
projs = params['projs']
if len(projs) > 0 and not epochs.proj:
ax_button = plt.subplot2grid((10, 15), (9, 14))
opt_button = mpl.widgets.Button(ax_button, 'Proj')
callback_option = partial(_toggle_options, params=params)
opt_button.on_clicked(callback_option)
params['opt_button'] = opt_button
params['ax_button'] = ax_button
# As here code is shared with plot_evoked, some extra steps:
# first the actual plot update function
params['plot_update_proj_callback'] = _plot_update_epochs_proj
# then the toggle handler
callback_proj = partial(_toggle_proj, params=params)
# store these for use by callbacks in the options figure
params['callback_proj'] = callback_proj
callback_proj('none')
def _plot_traces(params):
""" Helper for plotting concatenated epochs """
params['text'].set_visible(False)
ax = params['ax']
butterfly = params['butterfly']
if butterfly:
ch_start = 0
n_channels = len(params['picks'])
data = params['data'] * params['butterfly_scale']
else:
ch_start = params['ch_start']
n_channels = params['n_channels']
data = params['data'] * params['scale_factor']
offsets = params['offsets']
lines = params['lines']
epochs = params['epochs']
n_times = len(epochs.times)
tick_list = list()
start_idx = int(params['t_start'] / n_times)
end = params['t_start'] + params['duration']
end_idx = int(end / n_times)
xlabels = params['labels'][start_idx:]
event_ids = params['epochs'].events[:, 2]
params['ax2'].set_xticklabels(event_ids[start_idx:])
ax.set_xticklabels(xlabels)
ylabels = ax.yaxis.get_ticklabels()
# do the plotting
for line_idx in range(n_channels):
ch_idx = line_idx + ch_start
if line_idx >= len(lines):
break
elif ch_idx < len(params['ch_names']):
if butterfly:
ch_type = params['types'][ch_idx]
if ch_type == 'grad':
offset = offsets[0]
elif ch_type == 'mag':
offset = offsets[1]
elif ch_type == 'eeg':
offset = offsets[2]
elif ch_type == 'eog':
offset = offsets[3]
elif ch_type == 'ecg':
offset = offsets[4]
else:
lines[line_idx].set_segments(list())
else:
tick_list += [params['ch_names'][ch_idx]]
offset = offsets[line_idx]
this_data = data[ch_idx]
# subtraction here gets correct orientation for flipped ylim
ydata = offset - this_data
xdata = params['times'][:params['duration']]
num_epochs = np.min([params['n_epochs'],
len(epochs.events)])
segments = np.split(np.array((xdata, ydata)).T, num_epochs)
ch_name = params['ch_names'][ch_idx]
if ch_name in params['info']['bads']:
if not butterfly:
this_color = params['bad_color']
ylabels[line_idx].set_color(this_color)
this_color = np.tile((params['bad_color']), (num_epochs, 1))
for bad_idx in params['bads']:
if bad_idx < start_idx or bad_idx > end_idx:
continue
this_color[bad_idx - start_idx] = (1., 0., 0.)
lines[line_idx].set_zorder(2)
else:
this_color = params['colors'][ch_idx][start_idx:end_idx]
lines[line_idx].set_zorder(3)
if not butterfly:
ylabels[line_idx].set_color('black')
lines[line_idx].set_segments(segments)
lines[line_idx].set_color(this_color)
else:
lines[line_idx].set_segments(list())
# finalize plot
ax.set_xlim(params['times'][0], params['times'][0] + params['duration'],
False)
params['ax2'].set_xlim(params['times'][0],
params['times'][0] + params['duration'], False)
if butterfly:
factor = -1. / params['butterfly_scale']
labels = np.empty(20, dtype='S15')
labels.fill('')
ticks = ax.get_yticks()
idx_offset = 1
if 'grad' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[0]) *
params['scalings']['grad'] *
1e13 * factor)
idx_offset += 4
if 'mag' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[1]) *
params['scalings']['mag'] *
1e15 * factor)
idx_offset += 4
if 'eeg' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[2]) *
params['scalings']['eeg'] *
1e6 * factor)
idx_offset += 4
if 'eog' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[3]) *
params['scalings']['eog'] *
1e6 * factor)
idx_offset += 4
if 'ecg' in params['types']:
labels[idx_offset + 1] = '0.00'
for idx in [idx_offset, idx_offset + 2]:
labels[idx] = '{0:.2f}'.format((ticks[idx] - offsets[4]) *
params['scalings']['ecg'] *
1e6 * factor)
ax.set_yticklabels(labels, fontsize=12, color='black')
else:
ax.set_yticklabels(tick_list, fontsize=12)
params['vsel_patch'].set_y(ch_start)
params['fig'].canvas.draw()
# XXX This is a hack to make sure this figure gets drawn last
# so that when matplotlib goes to calculate bounds we don't get a
# CGContextRef error on the MacOSX backend :(
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _plot_update_epochs_proj(params, bools=None):
"""Helper only needs to be called when proj is changed"""
if bools is not None:
inds = np.where(bools)[0]
params['info']['projs'] = [copy.deepcopy(params['projs'][ii])
for ii in inds]
params['proj_bools'] = bools
params['projector'], _ = setup_proj(params['info'], add_eeg_ref=False,
verbose=False)
start = int(params['t_start'] / len(params['epochs'].times))
n_epochs = params['n_epochs']
end = start + n_epochs
data = np.concatenate(params['epochs'][start:end].get_data(), axis=1)
if params['projector'] is not None:
data = np.dot(params['projector'], data)
types = params['types']
for pick, ind in enumerate(params['inds']):
params['data'][pick] = data[ind] / params['scalings'][types[pick]]
params['plot_fun']()
def _handle_picks(epochs):
"""Aux function to handle picks."""
if any('ICA' in k for k in epochs.ch_names):
picks = pick_types(epochs.info, misc=True, ref_meg=False,
exclude=[])
else:
picks = pick_types(epochs.info, meg=True, eeg=True, eog=True, ecg=True,
seeg=True, ecog=True, ref_meg=False, exclude=[])
return picks
def _plot_window(value, params):
"""Deal with horizontal shift of the viewport."""
max_times = len(params['times']) - params['duration']
if value > max_times:
value = len(params['times']) - params['duration']
if value < 0:
value = 0
if params['t_start'] != value:
params['t_start'] = value
params['hsel_patch'].set_x(value)
params['plot_update_proj_callback'](params)
def _plot_vert_lines(params):
""" Helper function for plotting vertical lines."""
ax = params['ax']
while len(ax.lines) > 0:
ax.lines.pop()
params['vert_lines'] = list()
params['vertline_t'].set_text('')
epochs = params['epochs']
if params['settings'][3]: # if zeroline visible
t_zero = np.where(epochs.times == 0.)[0]
if len(t_zero) == 1:
for event_idx in range(len(epochs.events)):
pos = [event_idx * len(epochs.times) + t_zero[0],
event_idx * len(epochs.times) + t_zero[0]]
ax.plot(pos, ax.get_ylim(), 'g', zorder=4, alpha=0.4)
for epoch_idx in range(len(epochs.events)):
pos = [epoch_idx * len(epochs.times), epoch_idx * len(epochs.times)]
ax.plot(pos, ax.get_ylim(), color='black', linestyle='--', zorder=2)
def _pick_bad_epochs(event, params):
"""Helper for selecting / dropping bad epochs"""
if 'ica' in params:
pos = (event.xdata, event.ydata)
_pick_bad_channels(pos, params)
return
n_times = len(params['epochs'].times)
start_idx = int(params['t_start'] / n_times)
xdata = event.xdata
xlim = event.inaxes.get_xlim()
epoch_idx = start_idx + int(xdata / (xlim[1] / params['n_epochs']))
total_epochs = len(params['epochs'].events)
if epoch_idx > total_epochs - 1:
return
# remove bad epoch
if epoch_idx in params['bads']:
params['bads'] = params['bads'][(params['bads'] != epoch_idx)]
for ch_idx in range(len(params['ch_names'])):
params['colors'][ch_idx][epoch_idx] = params['def_colors'][ch_idx]
params['ax_hscroll'].patches[epoch_idx].set_color('w')
params['ax_hscroll'].patches[epoch_idx].set_zorder(2)
params['plot_fun']()
return
# add bad epoch
params['bads'] = np.append(params['bads'], epoch_idx)
params['ax_hscroll'].patches[epoch_idx].set_color((1., 0., 0., 1.))
params['ax_hscroll'].patches[epoch_idx].set_zorder(3)
params['ax_hscroll'].patches[epoch_idx].set_edgecolor('w')
for ch_idx in range(len(params['ch_names'])):
params['colors'][ch_idx][epoch_idx] = (1., 0., 0., 1.)
params['plot_fun']()
def _pick_bad_channels(pos, params):
"""Helper function for selecting bad channels."""
text, ch_idx = _label2idx(params, pos)
if text is None:
return
if text in params['info']['bads']:
while text in params['info']['bads']:
params['info']['bads'].remove(text)
color = params['def_colors'][ch_idx]
params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
else:
params['info']['bads'].append(text)
color = params['bad_color']
params['ax_vscroll'].patches[ch_idx + 1].set_color(color)
if 'ica' in params:
params['plot_fun']()
else:
params['plot_update_proj_callback'](params)
def _plot_onscroll(event, params):
"""Function to handle scroll events."""
if event.key == 'control':
if event.step < 0:
event.key = '-'
else:
event.key = '+'
_plot_onkey(event, params)
return
if params['butterfly']:
return
_plot_raw_onscroll(event, params, len(params['ch_names']))
def _mouse_click(event, params):
"""Function to handle mouse click events."""
if event.inaxes is None:
if params['butterfly'] or not params['settings'][0]:
return
ax = params['ax']
ylim = ax.get_ylim()
pos = ax.transData.inverted().transform((event.x, event.y))
if pos[0] > 0 or pos[1] < 0 or pos[1] > ylim[0]:
return
if event.button == 1: # left click
params['label_click_fun'](pos)
elif event.button == 3: # right click
if 'ica' not in params:
_, ch_idx = _label2idx(params, pos)
if ch_idx is None:
return
if channel_type(params['info'], ch_idx) not in ['mag', 'grad',
'eeg', 'eog']:
logger.info('Event related fields / potentials only '
'available for MEG and EEG channels.')
return
fig = plot_epochs_image(params['epochs'],
picks=params['inds'][ch_idx],
fig=params['image_plot'])[0]
params['image_plot'] = fig
elif event.button == 1: # left click
# vertical scroll bar changed
if event.inaxes == params['ax_vscroll']:
if params['butterfly']:
return
# Don't let scrollbar go outside vertical scrollbar limits
# XXX: floating point exception on some machines if this happens.
ch_start = min(
max(int(event.ydata) - params['n_channels'] // 2, 0),
len(params['ch_names']) - params['n_channels'])
if params['ch_start'] != ch_start:
params['ch_start'] = ch_start
params['plot_fun']()
# horizontal scroll bar changed
elif event.inaxes == params['ax_hscroll']:
# find the closest epoch time
times = params['epoch_times']
offset = 0.5 * params['n_epochs'] * len(params['epochs'].times)
xdata = times.flat[np.abs(times - (event.xdata - offset)).argmin()]
_plot_window(xdata, params)
# main axes
elif event.inaxes == params['ax']:
_pick_bad_epochs(event, params)
elif event.inaxes == params['ax'] and event.button == 2: # middle click
params['fig'].canvas.draw()
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
elif event.inaxes == params['ax'] and event.button == 3: # right click
n_times = len(params['epochs'].times)
xdata = int(event.xdata % n_times)
prev_xdata = 0
if len(params['vert_lines']) > 0:
prev_xdata = params['vert_lines'][0][0].get_data()[0][0]
while len(params['vert_lines']) > 0:
params['ax'].lines.remove(params['vert_lines'][0][0])
params['vert_lines'].pop(0)
if prev_xdata == xdata: # lines removed
params['vertline_t'].set_text('')
params['plot_fun']()
return
ylim = params['ax'].get_ylim()
for epoch_idx in range(params['n_epochs']): # plot lines
pos = [epoch_idx * n_times + xdata, epoch_idx * n_times + xdata]
params['vert_lines'].append(params['ax'].plot(pos, ylim, 'y',
zorder=5))
params['vertline_t'].set_text('%0.3f' % params['epochs'].times[xdata])
params['plot_fun']()
def _plot_onkey(event, params):
"""Function to handle key presses."""
import matplotlib.pyplot as plt
if event.key == 'down':
if params['butterfly']:
return
params['ch_start'] += params['n_channels']
_channels_changed(params, len(params['ch_names']))
elif event.key == 'up':
if params['butterfly']:
return
params['ch_start'] -= params['n_channels']
_channels_changed(params, len(params['ch_names']))
elif event.key == 'left':
sample = params['t_start'] - params['duration']
sample = np.max([0, sample])
_plot_window(sample, params)
elif event.key == 'right':
sample = params['t_start'] + params['duration']
sample = np.min([sample, params['times'][-1] - params['duration']])
times = params['epoch_times']
xdata = times.flat[np.abs(times - sample).argmin()]
_plot_window(xdata, params)
elif event.key == '-':
if params['butterfly']:
params['butterfly_scale'] /= 1.1
else:
params['scale_factor'] /= 1.1
params['plot_fun']()
elif event.key in ['+', '=']:
if params['butterfly']:
params['butterfly_scale'] *= 1.1
else:
params['scale_factor'] *= 1.1
params['plot_fun']()
elif event.key == 'f11':
mng = plt.get_current_fig_manager()
mng.full_screen_toggle()
elif event.key == 'pagedown':
if params['n_channels'] == 1 or params['butterfly']:
return
n_channels = params['n_channels'] - 1
ylim = params['ax'].get_ylim()
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
params['ax'].collections.pop()
params['ax'].set_yticks(params['offsets'])
params['lines'].pop()
params['vsel_patch'].set_height(n_channels)
params['plot_fun']()
elif event.key == 'pageup':
if params['butterfly']:
return
from matplotlib.collections import LineCollection
n_channels = params['n_channels'] + 1
ylim = params['ax'].get_ylim()
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['n_channels'] = n_channels
lc = LineCollection(list(), antialiased=False, linewidths=0.5,
zorder=3, picker=3.)
params['ax'].add_collection(lc)
params['ax'].set_yticks(params['offsets'])
params['lines'].append(lc)
params['vsel_patch'].set_height(n_channels)
params['plot_fun']()
elif event.key == 'home':
n_epochs = params['n_epochs'] - 1
if n_epochs <= 0:
return
n_times = len(params['epochs'].times)
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
params['duration'] -= n_times
params['hsel_patch'].set_width(params['duration'])
params['data'] = params['data'][:, :-n_times]
params['plot_update_proj_callback'](params)
elif event.key == 'end':
n_epochs = params['n_epochs'] + 1
n_times = len(params['epochs'].times)
if n_times * n_epochs > len(params['times']):
return
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
if len(params['vert_lines']) > 0:
ax = params['ax']
pos = params['vert_lines'][0][0].get_data()[0] + params['duration']
params['vert_lines'].append(ax.plot(pos, ax.get_ylim(), 'y',
zorder=4))
params['duration'] += n_times
if params['t_start'] + params['duration'] > len(params['times']):
params['t_start'] -= n_times
params['hsel_patch'].set_x(params['t_start'])
params['hsel_patch'].set_width(params['duration'])
params['data'] = np.zeros((len(params['data']), params['duration']))
params['plot_update_proj_callback'](params)
elif event.key == 'b':
if params['fig_options'] is not None:
plt.close(params['fig_options'])
params['fig_options'] = None
_prepare_butterfly(params)
_plot_traces(params)
elif event.key == 'o':
if not params['butterfly']:
_open_options(params)
elif event.key == 'h':
_plot_histogram(params)
elif event.key == '?':
_onclick_help(event, params)
elif event.key == 'escape':
plt.close(params['fig'])
def _prepare_butterfly(params):
"""Helper function for setting up butterfly plot."""
from matplotlib.collections import LineCollection
butterfly = not params['butterfly']
if butterfly:
types = set(['grad', 'mag', 'eeg', 'eog',
'ecg']) & set(params['types'])
if len(types) < 1:
return
params['ax_vscroll'].set_visible(False)
ax = params['ax']
labels = ax.yaxis.get_ticklabels()
for label in labels:
label.set_visible(True)
ylim = (5. * len(types), 0.)
ax.set_ylim(ylim)
offset = ylim[0] / (4. * len(types))
ticks = np.arange(0, ylim[0], offset)
ticks = [ticks[x] if x < len(ticks) else 0 for x in range(20)]
ax.set_yticks(ticks)
used_types = 0
params['offsets'] = [ticks[2]]
if 'grad' in types:
pos = (0, 1 - (ticks[2] / ylim[0]))
params['ax2'].annotate('Grad (fT/cm)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'mag' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('Mag (fT)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'eeg' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('EEG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'eog' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('EOG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
params['offsets'].append(ticks[2 + used_types * 4])
if 'ecg' in types:
pos = (0, 1 - (ticks[2 + used_types * 4] / ylim[0]))
params['ax2'].annotate('ECG (uV)', xy=pos, xytext=(-70, 0),
ha='left', size=12, va='center',
xycoords='axes fraction', rotation=90,
textcoords='offset points')
used_types += 1
while len(params['lines']) < len(params['picks']):
lc = LineCollection(list(), antialiased=False, linewidths=0.5,
zorder=3, picker=3.)
ax.add_collection(lc)
params['lines'].append(lc)
else: # change back to default view
labels = params['ax'].yaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][0])
params['ax_vscroll'].set_visible(True)
while len(params['ax2'].texts) > 0:
params['ax2'].texts.pop()
n_channels = params['n_channels']
while len(params['lines']) > n_channels:
params['ax'].collections.pop()
params['lines'].pop()
ylim = (25., 0.)
params['ax'].set_ylim(ylim)
offset = ylim[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
params['ax'].set_yticks(params['offsets'])
params['butterfly'] = butterfly
def _onpick(event, params):
"""Helper to add a channel name on click"""
if event.mouseevent.button != 2 or not params['butterfly']:
return # text label added with a middle mouse button
lidx = np.where([l is event.artist for l in params['lines']])[0][0]
text = params['text']
text.set_x(event.mouseevent.xdata)
text.set_y(event.mouseevent.ydata)
text.set_text(params['ch_names'][lidx])
text.set_visible(True)
# do NOT redraw here, since for butterfly plots hundreds of lines could
# potentially be picked -- use _mouse_click (happens once per click)
# to do the drawing
def _close_event(event, params):
"""Function to drop selected bad epochs. Called on closing of the plot."""
params['epochs'].drop(params['bads'])
params['epochs'].info['bads'] = params['info']['bads']
logger.info('Channels marked as bad: %s' % params['epochs'].info['bads'])
def _resize_event(event, params):
"""Function to handle resize event"""
size = ','.join([str(s) for s in params['fig'].get_size_inches()])
set_config('MNE_BROWSE_RAW_SIZE', size)
_layout_figure(params)
def _update_channels_epochs(event, params):
"""Function for changing the amount of channels and epochs per view."""
from matplotlib.collections import LineCollection
# Channels
n_channels = int(np.around(params['channel_slider'].val))
offset = params['ax'].get_ylim()[0] / n_channels
params['offsets'] = np.arange(n_channels) * offset + (offset / 2.)
while len(params['lines']) > n_channels:
params['ax'].collections.pop()
params['lines'].pop()
while len(params['lines']) < n_channels:
lc = LineCollection(list(), linewidths=0.5, antialiased=False,
zorder=3, picker=3.)
params['ax'].add_collection(lc)
params['lines'].append(lc)
params['ax'].set_yticks(params['offsets'])
params['vsel_patch'].set_height(n_channels)
params['n_channels'] = n_channels
# Epochs
n_epochs = int(np.around(params['epoch_slider'].val))
n_times = len(params['epochs'].times)
ticks = params['epoch_times'] + 0.5 * n_times
params['ax2'].set_xticks(ticks[:n_epochs])
params['n_epochs'] = n_epochs
params['duration'] = n_times * n_epochs
params['hsel_patch'].set_width(params['duration'])
params['data'] = np.zeros((len(params['data']), params['duration']))
if params['t_start'] + n_times * n_epochs > len(params['times']):
params['t_start'] = len(params['times']) - n_times * n_epochs
params['hsel_patch'].set_x(params['t_start'])
params['plot_update_proj_callback'](params)
def _toggle_labels(label, params):
"""Function for toggling axis labels on/off."""
if label == 'Channel names visible':
params['settings'][0] = not params['settings'][0]
labels = params['ax'].yaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][0])
elif label == 'Event-id visible':
params['settings'][1] = not params['settings'][1]
labels = params['ax2'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][1])
elif label == 'Epoch-id visible':
params['settings'][2] = not params['settings'][2]
labels = params['ax'].xaxis.get_ticklabels()
for label in labels:
label.set_visible(params['settings'][2])
elif label == 'Zeroline visible':
params['settings'][3] = not params['settings'][3]
_plot_vert_lines(params)
params['fig'].canvas.draw()
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _open_options(params):
"""Function for opening the option window."""
import matplotlib.pyplot as plt
import matplotlib as mpl
if params['fig_options'] is not None:
# turn off options dialog
plt.close(params['fig_options'])
params['fig_options'] = None
return
width = 10
height = 3
fig_options = figure_nobar(figsize=(width, height), dpi=80)
fig_options.canvas.set_window_title('View settings')
params['fig_options'] = fig_options
ax_channels = plt.axes([0.15, 0.1, 0.65, 0.1])
ax_epochs = plt.axes([0.15, 0.25, 0.65, 0.1])
ax_button = plt.axes([0.85, 0.1, 0.1, 0.25])
ax_check = plt.axes([0.15, 0.4, 0.4, 0.55])
plt.axis('off')
params['update_button'] = mpl.widgets.Button(ax_button, 'Update')
params['channel_slider'] = mpl.widgets.Slider(ax_channels, 'Channels', 1,
len(params['ch_names']),
valfmt='%0.0f',
valinit=params['n_channels'])
params['epoch_slider'] = mpl.widgets.Slider(ax_epochs, 'Epochs', 1,
len(params['epoch_times']),
valfmt='%0.0f',
valinit=params['n_epochs'])
params['checkbox'] = mpl.widgets.CheckButtons(ax_check,
['Channel names visible',
'Event-id visible',
'Epoch-id visible',
'Zeroline visible'],
actives=params['settings'])
update = partial(_update_channels_epochs, params=params)
params['update_button'].on_clicked(update)
labels_callback = partial(_toggle_labels, params=params)
params['checkbox'].on_clicked(labels_callback)
close_callback = partial(_settings_closed, params=params)
params['fig_options'].canvas.mpl_connect('close_event', close_callback)
try:
params['fig_options'].canvas.draw()
params['fig_options'].show(warn=False)
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
except Exception:
pass
def _settings_closed(events, params):
"""Function to handle close event from settings dialog."""
params['fig_options'] = None
def _plot_histogram(params):
"""Function for plotting histogram of peak-to-peak values."""
import matplotlib.pyplot as plt
epochs = params['epochs']
p2p = np.ptp(epochs.get_data(), axis=2)
types = list()
data = list()
if 'eeg' in params['types']:
eegs = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'eeg'])
data.append(eegs.ravel())
types.append('eeg')
if 'mag' in params['types']:
mags = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'mag'])
data.append(mags.ravel())
types.append('mag')
if 'grad' in params['types']:
grads = np.array([p2p.T[i] for i,
x in enumerate(params['types']) if x == 'grad'])
data.append(grads.ravel())
types.append('grad')
params['histogram'] = plt.figure()
scalings = _handle_default('scalings')
units = _handle_default('units')
titles = _handle_default('titles')
colors = _handle_default('color')
for idx in range(len(types)):
ax = plt.subplot(len(types), 1, idx + 1)
plt.xlabel(units[types[idx]])
plt.ylabel('count')
color = colors[types[idx]]
rej = None
if epochs.reject is not None and types[idx] in epochs.reject.keys():
rej = epochs.reject[types[idx]] * scalings[types[idx]]
rng = [0., rej * 1.1]
else:
rng = None
plt.hist(data[idx] * scalings[types[idx]], bins=100, color=color,
range=rng)
if rej is not None:
ax.plot((rej, rej), (0, ax.get_ylim()[1]), color='r')
plt.title(titles[types[idx]])
params['histogram'].suptitle('Peak-to-peak histogram', y=0.99)
params['histogram'].subplots_adjust(hspace=0.6)
try:
params['histogram'].show(warn=False)
except:
pass
if params['fig_proj'] is not None:
params['fig_proj'].canvas.draw()
def _label2idx(params, pos):
"""Aux function for click on labels. Returns channel name and idx."""
labels = params['ax'].yaxis.get_ticklabels()
offsets = np.array(params['offsets']) + params['offsets'][0]
line_idx = np.searchsorted(offsets, pos[1])
text = labels[line_idx].get_text()
if len(text) == 0:
return None, None
ch_idx = params['ch_start'] + line_idx
return text, ch_idx
| bsd-3-clause |
hsuantien/scikit-learn | sklearn/metrics/ranking.py | 75 | 25426 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
RPGOne/scikit-learn | sklearn/tests/test_discriminant_analysis.py | 4 | 13141 | import sys
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import SkipTest
from sklearn.datasets import make_blobs
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import _cov
# import reload
version = sys.version_info
if version[0] == 3:
# Python 3+ import for reload. Builtin in Python2
if version[1] == 3:
reload = None
else:
from importlib import reload
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
# Data is just 9 separable points in the plane
X6 = np.array([[0, 0], [-2, -2], [-2, -1], [-1, -1], [-1, -2],
[1, 3], [1, 2], [2, 1], [2, 2]])
y6 = np.array([1, 1, 1, 1, 1, 2, 2, 2, 2])
y7 = np.array([1, 2, 3, 2, 3, 1, 2, 3, 1])
# Degenerate data with 1 feature (still should be separable)
X7 = np.array([[-3, ], [-2, ], [-1, ], [-1, ], [0, ], [1, ], [1, ],
[2, ], [3, ]])
# Data that has zero variance in one dimension and needs regularization
X2 = np.array([[-3, 0], [-2, 0], [-1, 0], [-1, 0], [0, 0], [1, 0], [1, 0],
[2, 0], [3, 0]])
# One element class
y4 = np.array([1, 1, 1, 1, 1, 1, 1, 1, 2])
# Data with less samples in a class than n_features
X5 = np.c_[np.arange(8), np.zeros((8, 3))]
y5 = np.array([0, 0, 0, 0, 0, 1, 1, 1])
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct
# values for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = LinearDiscriminantAnalysis(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = LinearDiscriminantAnalysis(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = LinearDiscriminantAnalysis(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = LinearDiscriminantAnalysis(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_priors():
# Test priors (negative priors)
priors = np.array([0.5, -0.5])
clf = LinearDiscriminantAnalysis(priors=priors)
msg = "priors must be non-negative"
assert_raise_message(ValueError, msg, clf.fit, X, y)
# Test that priors passed as a list are correctly handled (run to see if
# failure)
clf = LinearDiscriminantAnalysis(priors=[0.5, 0.5])
clf.fit(X, y)
# Test that priors always sum to 1
priors = np.array([0.5, 0.6])
prior_norm = np.array([0.45, 0.55])
clf = LinearDiscriminantAnalysis(priors=priors)
assert_warns(UserWarning, clf.fit, X, y)
assert_array_almost_equal(clf.priors_, prior_norm, 2)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_lsqr = LinearDiscriminantAnalysis(solver="lsqr")
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = LinearDiscriminantAnalysis(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = LinearDiscriminantAnalysis(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_explained_variance_ratio():
# Test if the sum of the normalized eigen vectors values equals 1,
# Also tests whether the explained_variance_ratio_ formed by the
# eigen solver is the same as the explained_variance_ratio_ formed
# by the svd solver
state = np.random.RandomState(0)
X = state.normal(loc=0, scale=100, size=(40, 20))
y = state.randint(0, 3, size=(40,))
clf_lda_eigen = LinearDiscriminantAnalysis(solver="eigen")
clf_lda_eigen.fit(X, y)
assert_almost_equal(clf_lda_eigen.explained_variance_ratio_.sum(), 1.0, 3)
clf_lda_svd = LinearDiscriminantAnalysis(solver="svd")
clf_lda_svd.fit(X, y)
assert_almost_equal(clf_lda_svd.explained_variance_ratio_.sum(), 1.0, 3)
tested_length = min(clf_lda_svd.explained_variance_ratio_.shape[0],
clf_lda_eigen.explained_variance_ratio_.shape[0])
# NOTE: clf_lda_eigen.explained_variance_ratio_ is not of n_components
# length. Make it the same length as clf_lda_svd.explained_variance_ratio_
# before comparison.
assert_array_almost_equal(clf_lda_svd.explained_variance_ratio_,
clf_lda_eigen.explained_variance_ratio_[:tested_length])
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = LinearDiscriminantAnalysis(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = LinearDiscriminantAnalysis(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_qda():
# QDA classification.
# This checks that QDA implements fit and predict and returns
# correct values for a simple toy dataset.
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
assert_array_equal(y_pred, y6)
# Assure that it works with 1D data
y_pred1 = clf.fit(X7, y6).predict(X7)
assert_array_equal(y_pred1, y6)
# Test probas estimates
y_proba_pred1 = clf.predict_proba(X7)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y6)
y_log_proba_pred1 = clf.predict_log_proba(X7)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1, 8)
y_pred3 = clf.fit(X6, y7).predict(X6)
# QDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y7))
# Classes should have at least 2 elements
assert_raises(ValueError, clf.fit, X6, y4)
def test_qda_priors():
clf = QuadraticDiscriminantAnalysis()
y_pred = clf.fit(X6, y6).predict(X6)
n_pos = np.sum(y_pred == 2)
neg = 1e-10
clf = QuadraticDiscriminantAnalysis(priors=np.array([neg, 1 - neg]))
y_pred = clf.fit(X6, y6).predict(X6)
n_pos2 = np.sum(y_pred == 2)
assert_greater(n_pos2, n_pos)
def test_qda_store_covariances():
# The default is to not set the covariances_ attribute
clf = QuadraticDiscriminantAnalysis().fit(X6, y6)
assert_true(not hasattr(clf, 'covariances_'))
# Test the actual attribute:
clf = QuadraticDiscriminantAnalysis(store_covariances=True).fit(X6, y6)
assert_true(hasattr(clf, 'covariances_'))
assert_array_almost_equal(
clf.covariances_[0],
np.array([[0.7, 0.45], [0.45, 0.7]])
)
assert_array_almost_equal(
clf.covariances_[1],
np.array([[0.33333333, -0.33333333], [-0.33333333, 0.66666667]])
)
def test_qda_regularization():
# the default is reg_param=0. and will cause issues
# when there is a constant variable
clf = QuadraticDiscriminantAnalysis()
with ignore_warnings():
y_pred = clf.fit(X2, y6).predict(X2)
assert_true(np.any(y_pred != y6))
# adding a little regularization fixes the problem
clf = QuadraticDiscriminantAnalysis(reg_param=0.01)
with ignore_warnings():
clf.fit(X2, y6)
y_pred = clf.predict(X2)
assert_array_equal(y_pred, y6)
# Case n_samples_in_a_class < n_features
clf = QuadraticDiscriminantAnalysis(reg_param=0.1)
with ignore_warnings():
clf.fit(X5, y5)
y_pred5 = clf.predict(X5)
assert_array_equal(y_pred5, y5)
def test_deprecated_lda_qda_deprecation():
if reload is None:
raise SkipTest("Can't reload module on Python3.3")
def import_lda_module():
import sklearn.lda
# ensure that we trigger DeprecationWarning even if the sklearn.lda
# was loaded previously by another test.
reload(sklearn.lda)
return sklearn.lda
lda = assert_warns(DeprecationWarning, import_lda_module)
assert lda.LDA is LinearDiscriminantAnalysis
def import_qda_module():
import sklearn.qda
# ensure that we trigger DeprecationWarning even if the sklearn.qda
# was loaded previously by another test.
reload(sklearn.qda)
return sklearn.qda
qda = assert_warns(DeprecationWarning, import_qda_module)
assert qda.QDA is QuadraticDiscriminantAnalysis
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = _cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = _cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
| bsd-3-clause |
sinhrks/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
vincent-noel/SigNetSim | signetsim/settings/Settings.py | 2 | 1240 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel (vincent.noel@butantan.gov.br)
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
""" Settings.py
This file ...
"""
import seaborn
class Settings(object):
default_colors = seaborn.color_palette(palette="pastel").as_hex()
# default_colors = ["#FFB300", "#803E75", "#FF6800", "#A6BDD7", "#C10020", "#CEA262", "#817066", "#007D34", "#F6768E", "#00538A",
# "#FF7A5C", "#53377A", "#FF8E00", "#B32851", "#F4C800", "#7F180D", "#93AA00", "#593315", "#F13A13", "#232C16"]
maxVisitorCPUTime = 60 # 1 min computation max for visitors | agpl-3.0 |
zhakui/QMarkdowner | setup.py | 4 | 14173 | # -*- coding: utf-8 -*-
"""
利用Python脚本从svn获取最新版本信息,然后利用py2exe进行打包,最新进行相应的清理操作
"""
import os
import time
import glob
import shutil
import subprocess
import sys
import stat
import zipfile
import json
from distribution import Distribution
from Cheetah.Template import Template
def change_package_fromLib(package_name):
'''
根据包名从python Lib中获取到包,替换library.zip中名字相同的包
'''
library_zippath = os.getcwd() + os.sep + os.sep.join(['dist', 'library.zip'])
library_path = os.getcwd() + os.sep + os.sep.join(['dist', 'library'])
with zipfile.ZipFile(library_zippath, 'r') as zip_file:
zip_file.extractall(path=library_path)
shutil.rmtree(library_path + os.sep + package_name)
for item in [package_name]:
package = __import__(item)
package_path = os.path.dirname(package.__file__)
shutil.copytree(package_path, library_path + os.sep + package_name)
os.remove(os.getcwd() + os.sep + os.sep.join(['dist', 'library.zip']))
addFolderToZip(library_path, 'dist\library.zip')
shutil.rmtree(library_path)
def change_package_fromLocal(package_name):
'''
根据包名从当前项目中获取到包,替换library.zip中名字相同的包
'''
library_zippath = os.getcwd() + os.sep + os.sep.join(['dist', 'library.zip'])
library_path = os.getcwd() + os.sep + os.sep.join(['dist', 'library'])
with zipfile.ZipFile(library_zippath, 'r') as zip_file:
zip_file.extractall(path=library_path)
shutil.rmtree(library_path + os.sep + package_name)
for item in [package_name]:
package_path = os.getcwd() + os.sep + item
shutil.copytree(package_path, library_path + os.sep + package_name)
os.remove(os.getcwd() + os.sep + os.sep.join(['dist', 'library.zip']))
addFolderToZip(library_path, 'dist\library.zip')
shutil.rmtree(library_path)
def addFolderToZip(folder, zip_filename):
'''
将文件夹foldler添加到名字为zip_filename的zip中去
'''
with zipfile.ZipFile(zip_filename, 'w') as zip_file:
def addhandle(folder, zip_file):
for f in os.listdir(folder):
full_path = os.path.join(folder, f)
if os.path.isfile(full_path):
print 'Add file: %s' % full_path
zip_file.write(full_path, full_path.split('library\\')[1])
elif os.path.isdir(full_path):
print 'add folder: %s' % full_path
addhandle(full_path, zip_file)
addhandle(folder, zip_file)
def delete_file_folder(src):
'''delete files and folders'''
if os.path.isfile(src):
try:
os.remove(src)
except:
pass
elif os.path.isdir(src):
for item in os.listdir(src):
itemsrc = os.path.join(src, item)
delete_file_folder(itemsrc)
try:
os.rmdir(src)
except:
pass
def get_py2exe_datafiles(datapath, relativehead):
head, tail = os.path.split(datapath)
d = {}
for root, dirs, files in os.walk(datapath):
files = [os.path.join(root, filename) for filename in files]
root = root.replace(tail, relativehead)
root = root[root.index(relativehead):]
d[root] = files
return d.items()
def write_file(filename, content):
'''
将相应的content写入filename中去
'''
fd = open(filename, "w")
fd.write(content)
fd.close()
def get_sw_distributedname(sw_name, sw_version, project_svnpath):
'''
输入:
sw_name : 软件名称
sw_version: 软件版本号
project_svnpath: 项目在svn中的路径
输出:
project_localpath: 项目在本地的路径
distributedname:软件发行版本号
作用:
从svn仓库将项目checkout到本地,获取svn版本号,构建软件发行版本号distributedname
'''
project_localpath = os.sep.join([os.getcwd(), sw_name])
subprocess.call('svn co %s %s' % (project_svnpath, project_localpath))
svn_info = subprocess.check_output(['svn', 'info'])
svn_infolines = svn_info.split(os.linesep)
svn_version = svn_infolines[6][6:]
buildtime = time.strftime("%Y%m%d", time.localtime(int(time.time()))).decode('UTF8')
distributedname = '%s-v%s-r%s-b%s' % (sw_name, sw_version, svn_version, buildtime)
info = [svn_version, buildtime]
return project_localpath, distributedname, info
def clearsvn(sw_path):
'''
输入:
sw_path: 项目软件路径
输出:
空
作用:
调用shutil.rmtree进行整个文件夹删除
'''
for (p, d, f) in os.walk(sw_path):
if p.find('.svn') > 0:
shutil.rmtree(p)
def getfiles(path):
'''
获取指定path下的所有文件列表
'''
files = []
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
files.append(os.sep.join([dirpath, filename]))
return files
def getjsfiles(path):
'''
获取指定path下的所有后缀为.js的文件列表
'''
jsfiles = []
files = getfiles(path)
for filename in files:
if filename[-3:] == '.js':
jsfiles.append(filename)
return jsfiles
def jsminhandle(jsfiles):
'''
压缩所有js文件
'''
yuicompressor_path = os.sep.join([os.getcwd(), 'yuicompressor-2.4.6.jar'])
for jsfile in jsfiles:
if jsfile[-3:] == '.js':
print 'js min : %s' % jsfile
os.system('java -jar %s --type js -o %s %s' % (yuicompressor_path, jsfile, jsfile))
def getpyfiles(path):
'''
获取指定path下的所有后缀为.py的文件列表
'''
pyfiles = []
files = getfiles(path)
for filename in files:
if filename[-3:] == '.py':
pyfiles.append(filename)
return pyfiles
def py2pyohandle(pyfiles):
'''
将指定的py文件编译为pyo文件
'''
for filename in pyfiles:
if os.path.basename(filename) not in ['ifpms.py', 'userifpms.py', 'mark.py']:
print 'py 2 pyo: %s' % filename
os.system("C:\Python25\python.exe -m py_compile %s" % filename)
os.remove(filename)
def generate_markpy(sw_path, product='default'):
content = '''
#!/usr/bin/env python
# -*- coding: UTF8 -*-
logo = '%s'
''' % product
import codecs
filepath = os.sep.join([sw_path, 'extensions', 'ifpms@ov-orange.com', 'pylib'])
with codecs.open(filepath, 'w', 'utf-8') as f:
f.write(content)
def generate_license(sw_path, product='default'):
src = os.sep.join([sw_path, 'license.%s.txt' % product])
dst = os.sep.join([sw_path, 'chrome', 'license.txt'])
os.rename(src, dst)
def generate_chromemanifest(sw_path, product='default'):
content = '''
content ifpms jar:ifpms.jar!/content/
content etc etc/
content wav wavs/
content sample sample/
content mapImg mapImg/
skin ifpms default jar:ifpms.jar!/skin/%s/
locale ifpms zh-CN jar:ifpms.jar!/locale/zh-CN/
locale ifpms en-US jar:ifpms.jar!/locale/en-US/
''' % product
import codecs
filepath = os.sep.join([sw_path, 'chrome', 'chrome.manifest'])
with codecs.open(filepath, 'w', 'utf-8') as f:
f.write(content)
def generate_nsi(sw_name, build, datatime):
content = '''
'''
import codecs
filepath = os.sep.join([path, '%s-setup.nsi' % sw_name])
with codecs.open(filepath, 'w', 'utf-8') as f:
f.write(content)
def auto_iss(template, nameSpace):
t = Template(template, searchList=[nameSpace])
code = unicode(t)
return code
templateDef_iss ='''
; Script generated by the Inno Setup Script Wizard.
; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES!
#define MyAppName "$MyAppName"
#define MyAppVersion "$MyAppVersion"
#define MyAppPublisher "$MyAppPublisher"
#define MyAppURL "$MyAppURL"
#define MyAppExeName "$MyAppExeName"
[Setup]
; NOTE: The value of AppId uniquely identifies this application.
; Do not use the same AppId value in installers for other applications.
; (To generate a new GUID, click Tools | Generate GUID inside the IDE.)
AppId={{8136D515-0A08-46A2-85CD-5BA9EC33D909}
AppName={#MyAppName}
AppVersion={#MyAppVersion}
;AppVerName={#MyAppName} {#MyAppVersion}
AppPublisher={#MyAppPublisher}
AppPublisherURL={#MyAppURL}
AppSupportURL={#MyAppURL}
AppUpdatesURL={#MyAppURL}
DefaultDirName={pf}\{#MyAppName}
DefaultGroupName={#MyAppName}
OutputBaseFilename=$distributedname
SetupIconFile=$SetupIconFile
Compression=lzma
SolidCompression=yes
[Languages]
Name: "english"; MessagesFile: "compiler:Default.isl"
[Tasks]
Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked
[Files]
Source: "$source_exe"; DestDir: "{app}"; Flags: ignoreversion
Source: "$source_folder"; DestDir: "{app}"; Flags: ignoreversion recursesubdirs createallsubdirs
; NOTE: Don't use "Flags: ignoreversion" on any shared system files
[Icons]
Name: "{group}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"
Name: "{commondesktop}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"; Tasks: desktopicon
[Run]
Filename: "{app}\{#MyAppExeName}"; Description: "{cm:LaunchProgram,{#StringChange(MyAppName, '&', '&&')}}"; Flags: nowait postinstall skipifsilent
'''
if __name__ == '__main__':
# 从config中获取软件的基本信息
import config
sw_name = config.__softwarename__
sw_version = config.__version__
sw_publisher = config.__author__
sw_url = config.__url__
sw_description = config.__description__
sw_logoico = config.__logoico__
#生成软件版本号
p = subprocess.Popen(
['git','log'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
shell = True,
)
svn_version = str(360 + len(p.stdout.readlines())/ 6)
t = time.gmtime()
if t.tm_mon < 10 or t.tm_mday < 10:
mon = '0%d' % t.tm_mon
day = '0%d' % t.tm_mday
if t.tm_mon >= 10:
mon = t.tm_mon
if t.tm_mday >= 10:
day = t.tm_mday
buildtime = ''.join([str(i) for i in [t.tm_year, mon, day]])
else:
buildtime = ''.join([str(i) for i in [t.tm_year, t.tm_mon, t.tm_mday]])
info = [svn_version, buildtime]
distributedname = distributedname = '%s-v%s-r%s-b%s' % (sw_name, sw_version, svn_version, buildtime)
# 利用模板自动生成setup.iss打包脚本
setup_iss_file = '%s-setup.iss' % sw_name
nameSpace = {
'MyAppName': sw_name,
'MyAppVersion': sw_version,
'MyAppPublisher': sw_publisher,
'MyAppURL': sw_url,
'MyAppExeName': '%s.exe' % sw_name,
'distributedname': distributedname,
'SetupIconFile': sw_logoico,
'source_exe': os.sep.join([os.getcwd(), 'dist', '%s.exe' % sw_name]),
'source_folder': os.sep.join([os.getcwd(), 'dist', '*'])
}
setup_iss_content = auto_iss(templateDef_iss, nameSpace)
with open(setup_iss_file, 'w') as f:
f.write(setup_iss_content)
# 利用py2exe将项目打包成可执行文件
if os.name == "nt":
dist = Distribution()
dist.vs2008 = None
dist.setup(
name=sw_name,
version=sw_version,
description=u"Application based on PyQt4",
script=os.sep.join([os.getcwd(), 'QMain.py']),
target_name='QMarkdowner',
icon=sw_logoico)
dist.add_modules('PyQt4')
dist.bin_excludes += ["libzmq.dll"]
dist.includes += []
# dist.data_files += matplotlibdata_files
dist.data_files += get_py2exe_datafiles(os.sep.join([os.getcwd(), 'utildialog', 'utildialogskin']), 'utildialogskin')
dist.data_files += [('phonon_backend', [
'C:\Python27\Lib\site-packages\PyQt4\plugins\phonon_backend\phonon_ds94.dll'
]),
('imageplugins', [
'c:\Python27\lib\site-packages\PyQt4\plugins\imageformats\qgif4.dll',
'c:\Python27\lib\site-packages\PyQt4\plugins\imageformats\qjpeg4.dll',
'c:\Python27\lib\site-packages\PyQt4\plugins\imageformats\qsvg4.dll',
'c:\Python27\lib\site-packages\PyQt4\plugins\imageformats\qico4.dll',
])]
dist.excludes += [
'_gtkagg',
'_tkagg',
'_agg2',
'_cairo',
'_cocoaagg',
'_fltkagg',
'_gtk',
'_gtkcairo', ]
dist.build('py2exe')
'''
拷贝响应的图片皮肤和与项目有关的资源文件到打包目录
'''
for item in ['skin', 'webjscss', 'dependtool', 'doc']:
shutil.copytree(os.getcwd() + os.sep + item, os.getcwd() + os.sep + os.sep.join(['dist', item]))
for item in ['log', 'options']:
os.mkdir(os.getcwd() + os.sep + os.sep.join(['dist', item]))
change_package_fromLocal('Cheetah')
# 在options中生成软件版本信息文件
sw_info = json.dumps({
"sw_name": sw_name,
"sw_version": 'v%s' % sw_version,
"svn_version": 'r%s' % info[0],
"buildtime": 'b%s' % info[1]},
indent=3)
write_file(os.sep.join([os.getcwd(), 'dist', 'options', 'ver.json']), sw_info)
# 调用外面iscc 运行setup.iss, 生成安装版本exe
os.system("iscc %s" % setup_iss_file)
for key in ['build', 'dist', sw_name, 'QMarkdowner-setup.iss']:
path = os.getcwd() + os.sep + key
delete_file_folder(path)
| mit |
arju88nair/projectCulminate | venv/lib/python3.5/site-packages/nltk/sentiment/util.py | 7 | 31020 | # coding: utf-8
#
# Natural Language Toolkit: Sentiment Analyzer
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Pierpaolo Pantone <24alsecondo@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Utility methods for Sentiment Analysis.
"""
from __future__ import division
from copy import deepcopy
import codecs
import csv
import json
import pickle
import random
import re
import sys
import time
import nltk
from nltk.corpus import CategorizedPlaintextCorpusReader
from nltk.data import load
from nltk.tokenize.casual import EMOTICON_RE
from nltk.twitter.common import outf_writer_compat, extract_fields
#////////////////////////////////////////////////////////////
#{ Regular expressions
#////////////////////////////////////////////////////////////
# Regular expression for negation by Christopher Potts
NEGATION = r"""
(?:
^(?:never|no|nothing|nowhere|noone|none|not|
havent|hasnt|hadnt|cant|couldnt|shouldnt|
wont|wouldnt|dont|doesnt|didnt|isnt|arent|aint
)$
)
|
n't"""
NEGATION_RE = re.compile(NEGATION, re.VERBOSE)
CLAUSE_PUNCT = r'^[.:;!?]$'
CLAUSE_PUNCT_RE = re.compile(CLAUSE_PUNCT)
# Happy and sad emoticons
HAPPY = set([
':-)', ':)', ';)', ':o)', ':]', ':3', ':c)', ':>', '=]', '8)', '=)', ':}',
':^)', ':-D', ':D', '8-D', '8D', 'x-D', 'xD', 'X-D', 'XD', '=-D', '=D',
'=-3', '=3', ':-))', ":'-)", ":')", ':*', ':^*', '>:P', ':-P', ':P', 'X-P',
'x-p', 'xp', 'XP', ':-p', ':p', '=p', ':-b', ':b', '>:)', '>;)', '>:-)',
'<3'
])
SAD = set([
':L', ':-/', '>:/', ':S', '>:[', ':@', ':-(', ':[', ':-||', '=L', ':<',
':-[', ':-<', '=\\', '=/', '>:(', ':(', '>.<', ":'-(", ":'(", ':\\', ':-c',
':c', ':{', '>:\\', ';('
])
def timer(method):
"""
A timer decorator to measure execution performance of methods.
"""
def timed(*args, **kw):
start = time.time()
result = method(*args, **kw)
end = time.time()
tot_time = end - start
hours = tot_time // 3600
mins = tot_time // 60 % 60
# in Python 2.x round() will return a float, so we convert it to int
secs = int(round(tot_time % 60))
if hours == 0 and mins == 0 and secs < 10:
print('[TIMER] {0}(): {:.3f} seconds'.format(method.__name__, tot_time))
else:
print('[TIMER] {0}(): {1}h {2}m {3}s'.format(method.__name__, hours, mins, secs))
return result
return timed
#////////////////////////////////////////////////////////////
#{ Feature extractor functions
#////////////////////////////////////////////////////////////
"""
Feature extractor functions are declared outside the SentimentAnalyzer class.
Users should have the possibility to create their own feature extractors
without modifying SentimentAnalyzer.
"""
def extract_unigram_feats(document, unigrams, handle_negation=False):
"""
Populate a dictionary of unigram features, reflecting the presence/absence in
the document of each of the tokens in `unigrams`.
:param document: a list of words/tokens.
:param unigrams: a list of words/tokens whose presence/absence has to be
checked in `document`.
:param handle_negation: if `handle_negation == True` apply `mark_negation`
method to `document` before checking for unigram presence/absence.
:return: a dictionary of unigram features {unigram : boolean}.
>>> words = ['ice', 'police', 'riot']
>>> document = 'ice is melting due to global warming'.split()
>>> sorted(extract_unigram_feats(document, words).items())
[('contains(ice)', True), ('contains(police)', False), ('contains(riot)', False)]
"""
features = {}
if handle_negation:
document = mark_negation(document)
for word in unigrams:
features['contains({0})'.format(word)] = word in set(document)
return features
def extract_bigram_feats(document, bigrams):
"""
Populate a dictionary of bigram features, reflecting the presence/absence in
the document of each of the tokens in `bigrams`. This extractor function only
considers contiguous bigrams obtained by `nltk.bigrams`.
:param document: a list of words/tokens.
:param unigrams: a list of bigrams whose presence/absence has to be
checked in `document`.
:return: a dictionary of bigram features {bigram : boolean}.
>>> bigrams = [('global', 'warming'), ('police', 'prevented'), ('love', 'you')]
>>> document = 'ice is melting due to global warming'.split()
>>> sorted(extract_bigram_feats(document, bigrams).items())
[('contains(global - warming)', True), ('contains(love - you)', False),
('contains(police - prevented)', False)]
"""
features = {}
for bigr in bigrams:
features['contains({0} - {1})'.format(bigr[0], bigr[1])] = bigr in nltk.bigrams(document)
return features
#////////////////////////////////////////////////////////////
#{ Helper Functions
#////////////////////////////////////////////////////////////
def mark_negation(document, double_neg_flip=False, shallow=False):
"""
Append _NEG suffix to words that appear in the scope between a negation
and a punctuation mark.
:param document: a list of words/tokens, or a tuple (words, label).
:param shallow: if True, the method will modify the original document in place.
:param double_neg_flip: if True, double negation is considered affirmation
(we activate/deactivate negation scope everytime we find a negation).
:return: if `shallow == True` the method will modify the original document
and return it. If `shallow == False` the method will return a modified
document, leaving the original unmodified.
>>> sent = "I didn't like this movie . It was bad .".split()
>>> mark_negation(sent)
['I', "didn't", 'like_NEG', 'this_NEG', 'movie_NEG', '.', 'It', 'was', 'bad', '.']
"""
if not shallow:
document = deepcopy(document)
# check if the document is labeled. If so, do not consider the label.
labeled = document and isinstance(document[0], (tuple, list))
if labeled:
doc = document[0]
else:
doc = document
neg_scope = False
for i, word in enumerate(doc):
if NEGATION_RE.search(word):
if not neg_scope or (neg_scope and double_neg_flip):
neg_scope = not neg_scope
continue
else:
doc[i] += '_NEG'
elif neg_scope and CLAUSE_PUNCT_RE.search(word):
neg_scope = not neg_scope
elif neg_scope and not CLAUSE_PUNCT_RE.search(word):
doc[i] += '_NEG'
return document
def output_markdown(filename, **kwargs):
"""
Write the output of an analysis to a file.
"""
with codecs.open(filename, 'at') as outfile:
text = '\n*** \n\n'
text += '{0} \n\n'.format(time.strftime("%d/%m/%Y, %H:%M"))
for k in sorted(kwargs):
if isinstance(kwargs[k], dict):
dictionary = kwargs[k]
text += ' - **{0}:**\n'.format(k)
for entry in sorted(dictionary):
text += ' - {0}: {1} \n'.format(entry, dictionary[entry])
elif isinstance(kwargs[k], list):
text += ' - **{0}:**\n'.format(k)
for entry in kwargs[k]:
text += ' - {0}\n'.format(entry)
else:
text += ' - **{0}:** {1} \n'.format(k, kwargs[k])
outfile.write(text)
def save_file(content, filename):
"""
Store `content` in `filename`. Can be used to store a SentimentAnalyzer.
"""
print("Saving", filename)
with codecs.open(filename, 'wb') as storage_file:
# The protocol=2 parameter is for python2 compatibility
pickle.dump(content, storage_file, protocol=2)
def split_train_test(all_instances, n=None):
"""
Randomly split `n` instances of the dataset into train and test sets.
:param all_instances: a list of instances (e.g. documents) that will be split.
:param n: the number of instances to consider (in case we want to use only a
subset).
:return: two lists of instances. Train set is 8/10 of the total and test set
is 2/10 of the total.
"""
random.seed(12345)
random.shuffle(all_instances)
if not n or n > len(all_instances):
n = len(all_instances)
train_set = all_instances[:int(.8*n)]
test_set = all_instances[int(.8*n):n]
return train_set, test_set
def _show_plot(x_values, y_values, x_labels=None, y_labels=None):
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
plt.locator_params(axis='y', nbins=3)
axes = plt.axes()
axes.yaxis.grid()
plt.plot(x_values, y_values, 'ro', color='red')
plt.ylim(ymin=-1.2, ymax=1.2)
plt.tight_layout(pad=5)
if x_labels:
plt.xticks(x_values, x_labels, rotation='vertical')
if y_labels:
plt.yticks([-1, 0, 1], y_labels, rotation='horizontal')
# Pad margins so that markers are not clipped by the axes
plt.margins(0.2)
plt.show()
#////////////////////////////////////////////////////////////
#{ Parsing and conversion functions
#////////////////////////////////////////////////////////////
def json2csv_preprocess(json_file, outfile, fields, encoding='utf8', errors='replace',
gzip_compress=False, skip_retweets=True, skip_tongue_tweets=True,
skip_ambiguous_tweets=True, strip_off_emoticons=True, remove_duplicates=True,
limit=None):
"""
Convert json file to csv file, preprocessing each row to obtain a suitable
dataset for tweets Semantic Analysis.
:param json_file: the original json file containing tweets.
:param outfile: the output csv filename.
:param fields: a list of fields that will be extracted from the json file and
kept in the output csv file.
:param encoding: the encoding of the files.
:param errors: the error handling strategy for the output writer.
:param gzip_compress: if True, create a compressed GZIP file.
:param skip_retweets: if True, remove retweets.
:param skip_tongue_tweets: if True, remove tweets containing ":P" and ":-P"
emoticons.
:param skip_ambiguous_tweets: if True, remove tweets containing both happy
and sad emoticons.
:param strip_off_emoticons: if True, strip off emoticons from all tweets.
:param remove_duplicates: if True, remove tweets appearing more than once.
:param limit: an integer to set the number of tweets to convert. After the
limit is reached the conversion will stop. It can be useful to create
subsets of the original tweets json data.
"""
with codecs.open(json_file, encoding=encoding) as fp:
(writer, outf) = outf_writer_compat(outfile, encoding, errors, gzip_compress)
# write the list of fields as header
writer.writerow(fields)
if remove_duplicates == True:
tweets_cache = []
i = 0
for line in fp:
tweet = json.loads(line)
row = extract_fields(tweet, fields)
try:
text = row[fields.index('text')]
# Remove retweets
if skip_retweets == True:
if re.search(r'\bRT\b', text):
continue
# Remove tweets containing ":P" and ":-P" emoticons
if skip_tongue_tweets == True:
if re.search(r'\:\-?P\b', text):
continue
# Remove tweets containing both happy and sad emoticons
if skip_ambiguous_tweets == True:
all_emoticons = EMOTICON_RE.findall(text)
if all_emoticons:
if (set(all_emoticons) & HAPPY) and (set(all_emoticons) & SAD):
continue
# Strip off emoticons from all tweets
if strip_off_emoticons == True:
row[fields.index('text')] = re.sub(r'(?!\n)\s+', ' ', EMOTICON_RE.sub('', text))
# Remove duplicate tweets
if remove_duplicates == True:
if row[fields.index('text')] in tweets_cache:
continue
else:
tweets_cache.append(row[fields.index('text')])
except ValueError:
pass
writer.writerow(row)
i += 1
if limit and i >= limit:
break
outf.close()
def parse_tweets_set(filename, label, word_tokenizer=None, sent_tokenizer=None,
skip_header=True):
"""
Parse csv file containing tweets and output data a list of (text, label) tuples.
:param filename: the input csv filename.
:param label: the label to be appended to each tweet contained in the csv file.
:param word_tokenizer: the tokenizer instance that will be used to tokenize
each sentence into tokens (e.g. WordPunctTokenizer() or BlanklineTokenizer()).
If no word_tokenizer is specified, tweets will not be tokenized.
:param sent_tokenizer: the tokenizer that will be used to split each tweet into
sentences.
:param skip_header: if True, skip the first line of the csv file (which usually
contains headers).
:return: a list of (text, label) tuples.
"""
tweets = []
if not sent_tokenizer:
sent_tokenizer = load('tokenizers/punkt/english.pickle')
# If we use Python3.x we can proceed using the 'rt' flag
if sys.version_info[0] == 3:
with codecs.open(filename, 'rt') as csvfile:
reader = csv.reader(csvfile)
if skip_header == True:
next(reader, None) # skip the header
i = 0
for tweet_id, text in reader:
# text = text[1]
i += 1
sys.stdout.write('Loaded {0} tweets\r'.format(i))
# Apply sentence and word tokenizer to text
if word_tokenizer:
tweet = [w for sent in sent_tokenizer.tokenize(text)
for w in word_tokenizer.tokenize(sent)]
else:
tweet = text
tweets.append((tweet, label))
# If we use Python2.x we need to handle encoding problems
elif sys.version_info[0] < 3:
with codecs.open(filename) as csvfile:
reader = csv.reader(csvfile)
if skip_header == True:
next(reader, None) # skip the header
i = 0
for row in reader:
unicode_row = [x.decode('utf8') for x in row]
text = unicode_row[1]
i += 1
sys.stdout.write('Loaded {0} tweets\r'.format(i))
# Apply sentence and word tokenizer to text
if word_tokenizer:
tweet = [w.encode('utf8') for sent in sent_tokenizer.tokenize(text)
for w in word_tokenizer.tokenize(sent)]
else:
tweet = text
tweets.append((tweet, label))
print("Loaded {0} tweets".format(i))
return tweets
#////////////////////////////////////////////////////////////
#{ Demos
#////////////////////////////////////////////////////////////
def demo_tweets(trainer, n_instances=None, output=None):
"""
Train and test Naive Bayes classifier on 10000 tweets, tokenized using
TweetTokenizer.
Features are composed of:
- 1000 most frequent unigrams
- 100 top bigrams (using BigramAssocMeasures.pmi)
:param trainer: `train` method of a classifier.
:param n_instances: the number of total tweets that have to be used for
training and testing. Tweets will be equally split between positive and
negative.
:param output: the output file where results have to be reported.
"""
from nltk.tokenize import TweetTokenizer
from nltk.sentiment import SentimentAnalyzer
from nltk.corpus import twitter_samples, stopwords
# Different customizations for the TweetTokenizer
tokenizer = TweetTokenizer(preserve_case=False)
# tokenizer = TweetTokenizer(preserve_case=True, strip_handles=True)
# tokenizer = TweetTokenizer(reduce_len=True, strip_handles=True)
if n_instances is not None:
n_instances = int(n_instances/2)
fields = ['id', 'text']
positive_json = twitter_samples.abspath("positive_tweets.json")
positive_csv = 'positive_tweets.csv'
json2csv_preprocess(positive_json, positive_csv, fields, limit=n_instances)
negative_json = twitter_samples.abspath("negative_tweets.json")
negative_csv = 'negative_tweets.csv'
json2csv_preprocess(negative_json, negative_csv, fields, limit=n_instances)
neg_docs = parse_tweets_set(negative_csv, label='neg', word_tokenizer=tokenizer)
pos_docs = parse_tweets_set(positive_csv, label='pos', word_tokenizer=tokenizer)
# We separately split subjective and objective instances to keep a balanced
# uniform class distribution in both train and test sets.
train_pos_docs, test_pos_docs = split_train_test(pos_docs)
train_neg_docs, test_neg_docs = split_train_test(neg_docs)
training_tweets = train_pos_docs+train_neg_docs
testing_tweets = test_pos_docs+test_neg_docs
sentim_analyzer = SentimentAnalyzer()
# stopwords = stopwords.words('english')
# all_words = [word for word in sentim_analyzer.all_words(training_tweets) if word.lower() not in stopwords]
all_words = [word for word in sentim_analyzer.all_words(training_tweets)]
# Add simple unigram word features
unigram_feats = sentim_analyzer.unigram_word_feats(all_words, top_n=1000)
sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
# Add bigram collocation features
bigram_collocs_feats = sentim_analyzer.bigram_collocation_feats([tweet[0] for tweet in training_tweets],
top_n=100, min_freq=12)
sentim_analyzer.add_feat_extractor(extract_bigram_feats, bigrams=bigram_collocs_feats)
training_set = sentim_analyzer.apply_features(training_tweets)
test_set = sentim_analyzer.apply_features(testing_tweets)
classifier = sentim_analyzer.train(trainer, training_set)
# classifier = sentim_analyzer.train(trainer, training_set, max_iter=4)
try:
classifier.show_most_informative_features()
except AttributeError:
print('Your classifier does not provide a show_most_informative_features() method.')
results = sentim_analyzer.evaluate(test_set)
if output:
extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
output_markdown(output, Dataset='labeled_tweets', Classifier=type(classifier).__name__,
Tokenizer=tokenizer.__class__.__name__, Feats=extr,
Results=results, Instances=n_instances)
def demo_movie_reviews(trainer, n_instances=None, output=None):
"""
Train classifier on all instances of the Movie Reviews dataset.
The corpus has been preprocessed using the default sentence tokenizer and
WordPunctTokenizer.
Features are composed of:
- most frequent unigrams
:param trainer: `train` method of a classifier.
:param n_instances: the number of total reviews that have to be used for
training and testing. Reviews will be equally split between positive and
negative.
:param output: the output file where results have to be reported.
"""
from nltk.corpus import movie_reviews
from nltk.sentiment import SentimentAnalyzer
if n_instances is not None:
n_instances = int(n_instances/2)
pos_docs = [(list(movie_reviews.words(pos_id)), 'pos') for pos_id in movie_reviews.fileids('pos')[:n_instances]]
neg_docs = [(list(movie_reviews.words(neg_id)), 'neg') for neg_id in movie_reviews.fileids('neg')[:n_instances]]
# We separately split positive and negative instances to keep a balanced
# uniform class distribution in both train and test sets.
train_pos_docs, test_pos_docs = split_train_test(pos_docs)
train_neg_docs, test_neg_docs = split_train_test(neg_docs)
training_docs = train_pos_docs+train_neg_docs
testing_docs = test_pos_docs+test_neg_docs
sentim_analyzer = SentimentAnalyzer()
all_words = sentim_analyzer.all_words(training_docs)
# Add simple unigram word features
unigram_feats = sentim_analyzer.unigram_word_feats(all_words, min_freq=4)
sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
# Apply features to obtain a feature-value representation of our datasets
training_set = sentim_analyzer.apply_features(training_docs)
test_set = sentim_analyzer.apply_features(testing_docs)
classifier = sentim_analyzer.train(trainer, training_set)
try:
classifier.show_most_informative_features()
except AttributeError:
print('Your classifier does not provide a show_most_informative_features() method.')
results = sentim_analyzer.evaluate(test_set)
if output:
extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
output_markdown(output, Dataset='Movie_reviews', Classifier=type(classifier).__name__,
Tokenizer='WordPunctTokenizer', Feats=extr, Results=results,
Instances=n_instances)
def demo_subjectivity(trainer, save_analyzer=False, n_instances=None, output=None):
"""
Train and test a classifier on instances of the Subjective Dataset by Pang and
Lee. The dataset is made of 5000 subjective and 5000 objective sentences.
All tokens (words and punctuation marks) are separated by a whitespace, so
we use the basic WhitespaceTokenizer to parse the data.
:param trainer: `train` method of a classifier.
:param save_analyzer: if `True`, store the SentimentAnalyzer in a pickle file.
:param n_instances: the number of total sentences that have to be used for
training and testing. Sentences will be equally split between positive
and negative.
:param output: the output file where results have to be reported.
"""
from nltk.sentiment import SentimentAnalyzer
from nltk.corpus import subjectivity
if n_instances is not None:
n_instances = int(n_instances/2)
subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances]]
obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]]
# We separately split subjective and objective instances to keep a balanced
# uniform class distribution in both train and test sets.
train_subj_docs, test_subj_docs = split_train_test(subj_docs)
train_obj_docs, test_obj_docs = split_train_test(obj_docs)
training_docs = train_subj_docs+train_obj_docs
testing_docs = test_subj_docs+test_obj_docs
sentim_analyzer = SentimentAnalyzer()
all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs])
# Add simple unigram word features handling negation
unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4)
sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
# Apply features to obtain a feature-value representation of our datasets
training_set = sentim_analyzer.apply_features(training_docs)
test_set = sentim_analyzer.apply_features(testing_docs)
classifier = sentim_analyzer.train(trainer, training_set)
try:
classifier.show_most_informative_features()
except AttributeError:
print('Your classifier does not provide a show_most_informative_features() method.')
results = sentim_analyzer.evaluate(test_set)
if save_analyzer == True:
save_file(sentim_analyzer, 'sa_subjectivity.pickle')
if output:
extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
output_markdown(output, Dataset='subjectivity', Classifier=type(classifier).__name__,
Tokenizer='WhitespaceTokenizer', Feats=extr,
Instances=n_instances, Results=results)
return sentim_analyzer
def demo_sent_subjectivity(text):
"""
Classify a single sentence as subjective or objective using a stored
SentimentAnalyzer.
:param text: a sentence whose subjectivity has to be classified.
"""
from nltk.classify import NaiveBayesClassifier
from nltk.tokenize import regexp
word_tokenizer = regexp.WhitespaceTokenizer()
try:
sentim_analyzer = load('sa_subjectivity.pickle')
except LookupError:
print('Cannot find the sentiment analyzer you want to load.')
print('Training a new one using NaiveBayesClassifier.')
sentim_analyzer = demo_subjectivity(NaiveBayesClassifier.train, True)
# Tokenize and convert to lower case
tokenized_text = [word.lower() for word in word_tokenizer.tokenize(text)]
print(sentim_analyzer.classify(tokenized_text))
def demo_liu_hu_lexicon(sentence, plot=False):
"""
Basic example of sentiment classification using Liu and Hu opinion lexicon.
This function simply counts the number of positive, negative and neutral words
in the sentence and classifies it depending on which polarity is more represented.
Words that do not appear in the lexicon are considered as neutral.
:param sentence: a sentence whose polarity has to be classified.
:param plot: if True, plot a visual representation of the sentence polarity.
"""
from nltk.corpus import opinion_lexicon
from nltk.tokenize import treebank
tokenizer = treebank.TreebankWordTokenizer()
pos_words = 0
neg_words = 0
tokenized_sent = [word.lower() for word in tokenizer.tokenize(sentence)]
x = list(range(len(tokenized_sent))) # x axis for the plot
y = []
for word in tokenized_sent:
if word in opinion_lexicon.positive():
pos_words += 1
y.append(1) # positive
elif word in opinion_lexicon.negative():
neg_words += 1
y.append(-1) # negative
else:
y.append(0) # neutral
if pos_words > neg_words:
print('Positive')
elif pos_words < neg_words:
print('Negative')
elif pos_words == neg_words:
print('Neutral')
if plot == True:
_show_plot(x, y, x_labels=tokenized_sent, y_labels=['Negative', 'Neutral', 'Positive'])
def demo_vader_instance(text):
"""
Output polarity scores for a text using Vader approach.
:param text: a text whose polarity has to be evaluated.
"""
from nltk.sentiment import SentimentIntensityAnalyzer
vader_analyzer = SentimentIntensityAnalyzer()
print(vader_analyzer.polarity_scores(text))
def demo_vader_tweets(n_instances=None, output=None):
"""
Classify 10000 positive and negative tweets using Vader approach.
:param n_instances: the number of total tweets that have to be classified.
:param output: the output file where results have to be reported.
"""
from collections import defaultdict
from nltk.corpus import twitter_samples
from nltk.sentiment import SentimentIntensityAnalyzer
from nltk.metrics import (accuracy as eval_accuracy, precision as eval_precision,
recall as eval_recall, f_measure as eval_f_measure)
if n_instances is not None:
n_instances = int(n_instances/2)
fields = ['id', 'text']
positive_json = twitter_samples.abspath("positive_tweets.json")
positive_csv = 'positive_tweets.csv'
json2csv_preprocess(positive_json, positive_csv, fields, strip_off_emoticons=False,
limit=n_instances)
negative_json = twitter_samples.abspath("negative_tweets.json")
negative_csv = 'negative_tweets.csv'
json2csv_preprocess(negative_json, negative_csv, fields, strip_off_emoticons=False,
limit=n_instances)
pos_docs = parse_tweets_set(positive_csv, label='pos')
neg_docs = parse_tweets_set(negative_csv, label='neg')
# We separately split subjective and objective instances to keep a balanced
# uniform class distribution in both train and test sets.
train_pos_docs, test_pos_docs = split_train_test(pos_docs)
train_neg_docs, test_neg_docs = split_train_test(neg_docs)
training_tweets = train_pos_docs+train_neg_docs
testing_tweets = test_pos_docs+test_neg_docs
vader_analyzer = SentimentIntensityAnalyzer()
gold_results = defaultdict(set)
test_results = defaultdict(set)
acc_gold_results = []
acc_test_results = []
labels = set()
num = 0
for i, (text, label) in enumerate(testing_tweets):
labels.add(label)
gold_results[label].add(i)
acc_gold_results.append(label)
score = vader_analyzer.polarity_scores(text)['compound']
if score > 0:
observed = 'pos'
else:
observed = 'neg'
num += 1
acc_test_results.append(observed)
test_results[observed].add(i)
metrics_results = {}
for label in labels:
accuracy_score = eval_accuracy(acc_gold_results,
acc_test_results)
metrics_results['Accuracy'] = accuracy_score
precision_score = eval_precision(gold_results[label],
test_results[label])
metrics_results['Precision [{0}]'.format(label)] = precision_score
recall_score = eval_recall(gold_results[label],
test_results[label])
metrics_results['Recall [{0}]'.format(label)] = recall_score
f_measure_score = eval_f_measure(gold_results[label],
test_results[label])
metrics_results['F-measure [{0}]'.format(label)] = f_measure_score
for result in sorted(metrics_results):
print('{0}: {1}'.format(result, metrics_results[result]))
if output:
output_markdown(output, Approach='Vader', Dataset='labeled_tweets',
Instances=n_instances, Results=metrics_results)
if __name__ == '__main__':
from nltk.classify import NaiveBayesClassifier, MaxentClassifier
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.svm import LinearSVC
naive_bayes = NaiveBayesClassifier.train
svm = SklearnClassifier(LinearSVC()).train
maxent = MaxentClassifier.train
demo_tweets(naive_bayes)
# demo_movie_reviews(svm)
# demo_subjectivity(svm)
# demo_sent_subjectivity("she's an artist , but hasn't picked up a brush in a year . ")
# demo_liu_hu_lexicon("This movie was actually neither that funny, nor super witty.", plot=True)
# demo_vader_instance("This movie was actually neither that funny, nor super witty.")
# demo_vader_tweets()
| apache-2.0 |
DrigerG/IIITB-ML | experiments/image-analysis/mp-assignment-2/image_classification.py | 1 | 5100 | #!/usr/bin/env python
"""image_classification.py: Classify images to horses, bikes"""
import argparse
import os
import cv2
import numpy as np
from scipy.cluster import vq
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsClassifier
# Helper functions
def _load_img(path):
"""
:param path: path of image to be loaded.
:return: cv2 image object
"""
img = cv2.imread(path)
# Convert the image from cv2 default BGR format to RGB (for convenience)
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def _pretty_print(msg):
print
print '=' * len(msg)
print msg
print '=' * len(msg)
def _detect_and_describe(image):
"""
:param image: Input RGB color image
:return: SIFT keypoints and features tuple
"""
# detect and extract features from the image
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)
# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])
# return a tuple of keypoints and features
return kps, features
def _kmeans_clustering(data, k=7):
"""
:param data: input data
:param k: K value
:return: k-Means clusters
"""
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
flags = cv2.KMEANS_RANDOM_CENTERS
ret, label, centers = cv2.kmeans(data, k, None, criteria, 10, flags)
return centers
# Main wrapper methods
def extract_img_features(img_data_dir):
"""
:param img_data_dir: directory path where the images reside.
The images should start with 'horses' to denote horse image,
'bikes' to denote bike image
:return:
"""
dataset_size = len(os.listdir(img_data_dir))
resp = np.zeros((dataset_size, 1))
ctr = 0
print "\nProcessing images, and generating SIFT descriptors..\n"
des_list = []
for f in os.listdir(img_data_dir):
print "Processing image %s" % f
img = _load_img('%s/%s' % (img_data_dir, f))
kpts, des = _detect_and_describe(img)
des_list.append((f, des))
if f.startswith('bikes'):
resp[ctr] = 1
elif f.startswith('horses'):
resp[ctr] = 2
ctr += 1
descriptors = des_list[0][1]
for image_path, descriptor in des_list[1:]:
descriptors = np.vstack((descriptors, descriptor))
k = 7
print "\nClustering the SIFT descriptors to form BOVW dictionary..\n"
centers = _kmeans_clustering(descriptors, k)
im_features = np.zeros((dataset_size, k), "float32")
for i in xrange(dataset_size):
words, distance = vq.vq(des_list[i][1], centers)
for w in words:
im_features[i][w] += 1
# Scaling the values of features
stdSlr = StandardScaler().fit(im_features)
im_features = stdSlr.transform(im_features)
resp = np.float32(resp)
return im_features, resp
def train_classifier(train_data, train_resp, classifier='SVM'):
"""
:param train_data: training data array
:param train_resp: training data labels
:param classifier: Classifier model ('SVM' or 'KNN')
:return: trained classifier object
"""
if classifier == 'KNN':
model = KNeighborsClassifier(weights='distance', n_jobs=-1)
else:
model = LinearSVC()
model.fit(train_data, train_resp)
return model
def test_classifier(model, test_data, test_resp):
"""
:param model: trained kNN classifier object
:param test_data: test data array
:param test_resp: test data labels
:return: accuracy in float
"""
result = model.predict(test_data)
positives = 0
for entry in zip(test_resp, result):
if entry[0] == entry[1]:
positives += 1
accuracy = float(positives) / len(test_resp)
return accuracy
def main():
"""
Main wrapper to call the classifier
:return: None
"""
ap = argparse.ArgumentParser()
ap.add_argument("-tr", "--training-data-dir", required=True,
help="path to the first image")
ap.add_argument("-te", "--testing-data-dir", required=True,
help="path to the second image")
ap.add_argument("-m", "--model", required=True,
help="Model to be used 'KNN' or 'SVM'")
args = vars(ap.parse_args())
# Extract features and train the classifier
_pretty_print("Extracting training image features")
train_data, train_resp = extract_img_features(args['training_data_dir'])
_pretty_print("Training the classifier")
model = train_classifier(train_data, train_resp, args['model'])
# Extract features and test the classifier
_pretty_print("Extracting testing image features")
test_data, test_resp = extract_img_features(args['testing_data_dir'])
_pretty_print("Testing the classifier")
accuracy = test_classifier(model, test_data, test_resp)
print "\nAccuracy of the classifier: %s %%" % (accuracy * 100)
if __name__ == '__main__':
main()
| apache-2.0 |
ebaste/word_cloud | doc/sphinxext/gen_rst.py | 17 | 33207 | """
Example generation for the python wordcloud project. Stolen from scikit-learn with modifications from PyStruct.
Generate the rst files for the examples by iterating over the python
example files.
Hacked to plot every example (not only those that start with 'plot').
"""
from time import time
import os
import shutil
import traceback
import glob
import sys
from StringIO import StringIO
import cPickle
import re
import urllib2
import gzip
import posixpath
import codecs
try:
from PIL import Image
except:
import Image
import matplotlib
matplotlib.use('Agg')
import token
import tokenize
import numpy as np
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
resp = urllib2.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
'package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[value.keys()[0]]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
for comb_name in comb_names:
if html.find(comb_name) >= 0:
url = link + '#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
def extract_docstring(filename):
""" Extract a module-level docstring, if any
"""
lines = file(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(iter(lines).next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(app.builder.srcdir + '/../' + 'examples')
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
# we create an index.rst with all examples
fhindex = file(os.path.join(root_dir, 'index.rst'), 'w')
#Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
display: none;
}
.figure {
float: left;
margin: 16px;
top: 0;
left: 0;
-webkit-border-radius: 10px; /* Saf3-4, iOS 1-3.2, Android <1.6 */
-moz-border-radius: 10px; /* FF1-3.6 */
border-radius: 10px; /* Opera 10.5, IE9, Saf5, Chrome, FF4, iOS 4, Android 2.1+ */
border: 2px solid #fff;
-webkit-transition: all 0.15s ease-out; /* Saf3.2+, Chrome */
-moz-transition: all 0.15s ease-out; /* FF4+ */
-ms-transition: all 0.15s ease-out; /* IE10? */
-o-transition: all 0.15s ease-out; /* Opera 10.5+ */
transition: all 0.15s ease-out;
background-repeat: no-repeat;
/* --> Thumbnail image size */
width: 150px;
height: 130px;
}
.figure img {
display: inline;
}
.figure .caption {
text-align: center !important;
}
</style>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery)
for dir in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, dir)):
generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = file(example_file).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
tokens = tokenize.generate_tokens(lines.__iter__().next)
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif ((tok_type == 'STRING') and (check_docstring == True)):
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = filter(lambda x: x.endswith('.py'), file_list)
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:,0].astype(np.str),
unsorted[:,1].astype(np.float)))
return np.array(unsorted[index][:,0]).tolist()
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
print 80 * '_'
print ('Example directory %s does not have a README.txt file'
% src_dir)
print 'Skipping this directory'
print 80 * '_'
return
fhindex.write("""
%s
""" % file(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
for fname in sorted_listdir:
if fname.endswith('py'):
generate_file_rst(fname, target_dir, src_dir, plot_gallery)
thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(dir, fname).replace(os.path.sep, '_')
fhindex.write("""
.. raw:: html
<div class="thumbnailContainer">
""")
fhindex.write('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if dir != '.':
fhindex.write(' :target: ./%s/%s.html\n\n' % (dir,
fname[:-3]))
else:
fhindex.write(' :target: ./%s.html\n\n' % link_name[:-3])
fhindex.write(""" :ref:`%s`
.. raw:: html
</div>
.. toctree::
:hidden:
%s/%s
""" % (link_name, dir, fname[:-3]))
fhindex.write("""
.. raw:: html
<div style="clear: both"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy', 'wordcloud']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) / 2, (height - height_sc) / 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
def generate_file_rst(fname, target_dir, src_dir, plot_gallery):
""" Generate the rst file for a given example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%s.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
if plot_gallery:
# generate the plot as png image if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if (not os.path.exists(first_image_file) or
os.stat(first_image_file).st_mtime <=
os.stat(src_file).st_mtime):
# We need to execute the code
print 'plotting %s' % fname
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt, '__file__': src_file}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
# get variables so we can later add links to the documentation
example_code_obj = {}
for var_name, var in my_globals.iteritems():
if not hasattr(var, '__module__'):
continue
if not isinstance(var.__module__, basestring):
continue
if var.__module__.split('.')[0] not in DOCMODULES:
continue
# get the type as a string with other things stripped
tstr = str(type(var))
tstr = (tstr[tstr.find('\'')
+ 1:tstr.rfind('\'')].split('.')[-1])
# get shortened module name
module_short = get_short_module_name(var.__module__,
tstr)
cobj = {'name': tstr, 'module': var.__module__,
'module_short': module_short,
'obj_type': 'object'}
example_code_obj[var_name] = cobj
# find functions so we can later add links to the documentation
funregex = re.compile('[\w.]+\(')
with open(src_file, 'rt') as fid:
for line in fid.readlines():
if line.startswith('#'):
continue
for match in funregex.findall(line):
fun_name = match[:-1]
try:
exec('this_fun = %s' % fun_name, my_globals)
except Exception:
#print 'extracting function failed'
#print err
continue
this_fun = my_globals['this_fun']
if not callable(this_fun):
continue
if not hasattr(this_fun, '__module__'):
continue
if not isinstance(this_fun.__module__, basestring):
continue
if (this_fun.__module__.split('.')[0]
not in DOCMODULES):
continue
# get shortened module name
fun_name_short = fun_name.split('.')[-1]
module_short = get_short_module_name(
this_fun.__module__, fun_name_short)
cobj = {'name': fun_name_short,
'module': this_fun.__module__,
'module_short': module_short,
'obj_type': 'function'}
example_code_obj[fun_name] = cobj
fid.close()
if len(example_code_obj) > 0:
# save the dictionary, so we can later add hyperlinks
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
cPickle.dump(example_code_obj, fid,
cPickle.HIGHEST_PROTOCOL)
fid.close()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
for fig_num in (fig_mngr.num for fig_mngr in
matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_num)
plt.savefig(image_path % fig_num)
figure_list.append(image_fname % fig_num)
except:
print 80 * '_'
print '%s is not compiling:' % fname
traceback.print_exc()
print 80 * '_'
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print " - time elapsed : %.2g sec" % time_elapsed
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path % '[1-9]')]
#for f in glob.glob(image_path % '*')]
# generate thumb file
this_template = plot_rst_template
if os.path.exists(first_image_file):
make_thumbnail(first_image_file, thumb_file, 200, 140)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
try:
if exception is not None:
return
print 'Embedding documentation hyperlinks in examples..'
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['wordcloud'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
doc_resolvers['sklearn'] = SphinxDocLinkResolver(
'http://scikit-learn.org/stable')
doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
'http://matplotlib.org')
doc_resolvers['numpy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/numpy-1.6.0')
doc_resolvers['scipy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/scipy-0.11.0/reference')
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print '\tprocessing: %s' % fname
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = cPickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.iteritems():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
if link is not None:
parts = name.split('.')
name_html = orig_pattern % parts[0]
for part in parts[1:]:
name_html += period + orig_pattern % part
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
if len(str_repl) > 0:
with codecs.open(full_fname, 'rt', encoding='utf-8') as fid:
lines_in = fid.readlines()
fid.close()
with open(full_fname, 'wt') as fid:
for line in lines_in:
for name, link in str_repl.iteritems():
try:
line = line.encode("ascii", 'ignore').replace(name, link)
except Exception as e:
print(line)
print(name)
print(link)
raise e
fid.write(line)
fid.close()
except urllib2.HTTPError, e:
print ("The following HTTP Error has occurred:\n")
print e.code
except urllib2.URLError, e:
print ("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding due to a URL Error: \n")
print e.args
print '[done]'
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The model is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
| mit |
jseabold/scikit-learn | sklearn/_build_utils/cythonize.py | 19 | 5126 | #!/usr/bin/env python
""" cythonize
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'sklearn'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script detects changes in the pyx files using checksums [or hashes] stored
in a database file
Simple script to invoke Cython on all .pyx
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
It is called by ./setup.py sdist so that sdist package can be installed without
cython
Originally written by Dag Sverre Seljebotn, and adapted from statsmodel 0.6.1
(Modified BSD 3-clause)
We copied it for scikit-learn.
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files.
"""
# author: Arthur Mensch
# license: BSD
from __future__ import division, print_function, absolute_import
import os
import re
import sys
import hashlib
import subprocess
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'sklearn'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
#
# Rules
#
def process_pyx(fromfile, tofile):
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.21'):
raise Exception('Building scikit-learn requires Cython >= 0.21')
except ImportError:
pass
flags = ['--fast-fail']
if tofile.endswith('.cpp'):
flags += ['--cplus']
try:
try:
r = subprocess.call(['cython'] + flags + ["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see gh-2397.
r = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main '
'import setuptools_main as main;'
' sys.exit(main())'] + flags +
["-o", tofile, fromfile])
if r != 0:
raise Exception('Cython failed')
except OSError:
raise OSError('Cython needs to be installed')
rules = {
'.pyx': process_pyx,
}
#
# Hash db
#
def load_hashes(filename):
# Return { filename : (sha1 of input, sha1 of output) }
if os.path.isfile(filename):
hashes = {}
with open(filename, 'r') as f:
for line in f:
filename, inhash, outhash = line.split()
hashes[filename] = (inhash, outhash)
else:
hashes = {}
return hashes
def save_hashes(hash_db, filename):
with open(filename, 'w') as f:
for key, value in hash_db.items():
f.write("%s %s %s\n" % (key, value[0], value[1]))
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
#
# Main program
#
def normpath(path):
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash(frompath, topath):
from_hash = sha1_of_file(frompath)
to_hash = sha1_of_file(topath) if os.path.exists(topath) else None
return from_hash, to_hash
def process(path, fromfile, tofile, processor_function, hash_db):
fullfrompath = os.path.join(path, fromfile)
fulltopath = os.path.join(path, tofile)
current_hash = get_hash(fullfrompath, fulltopath)
if current_hash == hash_db.get(normpath(fullfrompath)):
print('%s has not changed' % fullfrompath)
return
print('Processing %s' % fullfrompath)
processor_function(fullfrompath, fulltopath)
# changed target file, recompute hash
current_hash = get_hash(fullfrompath, fulltopath)
# store hash in db
hash_db[normpath(fullfrompath)] = current_hash
def find_process_files(root_dir):
print(root_dir)
hash_db = load_hashes(HASH_FILE)
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
for fromext, function in rules.items():
if filename.endswith(fromext):
toext = ".c"
with open(os.path.join(cur_dir, filename), 'rb') as f:
data = f.read()
m = re.search(b"libcpp", data, re.I | re.M)
if m:
toext = ".cpp"
fromfile = filename
tofile = filename[:-len(fromext)] + toext
process(cur_dir, fromfile, tofile, function, hash_db)
save_hashes(hash_db, HASH_FILE)
def main(root_dir=DEFAULT_ROOT):
find_process_files(root_dir)
if __name__ == '__main__':
try:
root_dir_arg = sys.argv[1]
except IndexError:
root_dir_arg = DEFAULT_ROOT
main(root_dir_arg)
| bsd-3-clause |
blink1073/scikit-image | doc/examples/features_detection/plot_orb.py | 33 | 1807 | """
==========================================
ORB feature detector and binary descriptor
==========================================
This example demonstrates the ORB feature detection and binary description
algorithm. It uses an oriented FAST detection method and the rotated BRIEF
descriptors.
Unlike BRIEF, ORB is comparatively scale- and rotation-invariant while still
employing the very efficient Hamming distance metric for matching. As such, it
is preferred for real-time applications.
"""
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_harris,
corner_peaks, ORB, plot_matches)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img1 = rgb2gray(data.astronaut())
img2 = tf.rotate(img1, 180)
tform = tf.AffineTransform(scale=(1.3, 1.1), rotation=0.5,
translation=(0, -200))
img3 = tf.warp(img1, tform)
descriptor_extractor = ORB(n_keypoints=200)
descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img3)
keypoints3 = descriptor_extractor.keypoints
descriptors3 = descriptor_extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
fig, ax = plt.subplots(nrows=2, ncols=1)
plt.gray()
plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12)
ax[0].axis('off')
plot_matches(ax[1], img1, img3, keypoints1, keypoints3, matches13)
ax[1].axis('off')
plt.show()
| bsd-3-clause |
potash/drain | drain/exploration.py | 1 | 10781 | from tempfile import NamedTemporaryFile
from pprint import pformat
from itertools import product
from sklearn import tree
import pandas as pd
from collections import Counter
from six import StringIO
from drain import util, step
def explore(steps, reload=False):
return StepFrame(index=step.load(steps, reload=reload))
def expand(self, prefix=False, index=True, diff=True, existence=True):
"""
This function is a member of StepFrame and StepSeries. It is used to
expand the kwargs of the steps either into the index (index=True) or
as columns (index=False). By default (diff=True) only the kwargs which
differ among steps are expanded.
Note that index objects in pandas must be hashable so any unhashable
argument values are converted to string representations (using pprint)
when index=True.
If "inputs" is an argument those steps' kwargs are also expanded (and
their inputs recursively). If there are multiple steps with the same
argument names they are prefixed by their names or if those are not set
then by their class names. To enable prefixing for all args set
prefix=True.
Sometimes the difference between pipelines is that a step exists or it
doesn't. When diff=True and existence=True, instead of expanding all
the kwargs for that step, we expand a single column whose name is the
step name and whose value is a boolean indicating whether the step exists
in the given tree.
Args:
prefix: whether to always use step name prefix for kwarg name.
Default False, which uses prefixes when necessary, i.e. for
keywords that are shared by multiple step names.
index: If True expand args into index. Otherwise expand into
columns
diff: whether to only expand keywords whose values that are
non-constant
existence: whether to check for existence of a step in the tree
instead of a full diff. Only applicable when diff=True. See
note above.
Returns: a DatFrame with the arguments of the steps expanded.
"""
# collect kwargs resulting in a list of {name: kwargs} dicts
dicts = [step._collect_kwargs(s, drop_duplicate_names=True) for s in self.index]
# if any of the kwargs are themselves dicts, expand them
dicts = [{k: util.dict_expand(v) for k, v in s.items()} for s in dicts]
if diff:
diff_dicts = [{} for d in dicts] # the desired list of dicts
names = util.union([set(d.keys()) for d in dicts]) # all names among these steps
for name in names:
if existence:
ndicts = [d[name] for d in dicts if name in d.keys()] # all dicts for this name
else:
ndicts = [d[name] if name in d.keys() else {} for d in dicts]
ndiffs = util.dict_diff(ndicts) # diffs for this name
if sum(map(len, ndiffs)) == 0: # if they're all the same
# but not all had the key and existence=True
if existence and len(ndicts) < len(self):
for m, d in zip(diff_dicts, dicts):
m[name] = {tuple(): name in d.keys()}
else: # if there was a diff
diff_iter = iter(ndiffs)
for m, d in zip(diff_dicts, dicts):
if name in d.keys() or not existence:
m[name] = diff_iter.next() # get the corresponding diff
dicts = diff_dicts
# restructure so name is in the key
merged_dicts = []
for dd in dicts:
merged_dicts.append(util.dict_merge(*({tuple([name] + list(util.make_tuple(k))): v
for k, v in d.items()} for name, d in dd.items())))
# prefix_keys are the keys that will keep their prefix
keys = [list((k[1:] for k in d.keys())) for d in merged_dicts]
if not prefix:
key_count = [Counter(kk) for kk in keys]
prefix_keys = util.union({k for k in c if c[k] > 1} for c in key_count)
else:
prefix_keys = util.union((set(kk) for kk in keys))
merged_dicts = [{str.join('_', map(str, k if k[1:] in prefix_keys else k[1:])): v
for k, v in d.items()} for d in merged_dicts]
expanded = pd.DataFrame(merged_dicts, index=self.index)
if index:
columns = list(expanded.columns)
try:
if len(columns) > 0:
expanded.set_index(columns, inplace=True)
else:
expanded.index = [None]*len(expanded)
except TypeError:
_print_unhashable(expanded, columns)
expanded.set_index(columns, inplace=True)
df = self.__class__.__bases__[0](self, copy=True)
df.index = expanded.index
else:
df = pd.concat((expanded, self), axis=1)
# When index=False, the index is still a Step collection
df = StepFrame(expanded)
return df
def dapply(self, fn, pairwise=False, symmetric=True, diagonal=False, block=None, **kwargs):
"""
Apply function to each step object in the index
Args:
fn: function to apply. If a list then each function is applied
pairwise: whether to apply the function to pairs of steps
symmetric, diagonal, block: passed to apply_pairwise when pairwise=True
kwargs: a keyword arguments to pass to each function. Arguments
with list value are grid searched using util.dict_product.
Returns: a StepFrame or StepSeries
"""
search_keys = [k for k, v in kwargs.items() if isinstance(v, list) and len(v) > 1]
functions = util.make_list(fn)
search = list(product(functions, util.dict_product(kwargs)))
results = []
for fn, kw in search:
if not pairwise:
r = self.index.to_series().apply(lambda step: fn(step, **kw))
else:
r = apply_pairwise(self, fn,
symmetric=symmetric, diagonal=diagonal, block=block,
**kw)
name = [] if len(functions) == 1 else [fn.__name__]
name += util.dict_subset(kw, search_keys).values()
if isinstance(r, pd.DataFrame):
columns = pd.MultiIndex.from_tuples(
[tuple(name + util.make_list(c)) for c in r.columns])
r.columns = columns
else:
r.name = tuple(name)
results.append(r)
if len(results) > 1:
result = pd.concat(results, axis=1)
# get subset of parameters that were searched over
column_names = [] if len(functions) == 1 else [None]
column_names += search_keys
column_names += [None]*(len(result.columns.names)-len(column_names))
result.columns.names = column_names
return StepFrame(result)
else:
result = results[0]
if isinstance(result, pd.DataFrame):
return StepFrame(result)
else:
result.name = functions[0].__name__
return StepSeries(result)
def apply_pairwise(self, function, symmetric=True, diagonal=False, block=None, **kwargs):
"""
Helper function for pairwise apply.
Args:
steps: an ordered collection of steps
function: function to apply, first two positional arguments are steps
symmetric: whether function is symmetric in the two steps
diagonal: whether to apply on the diagonal
block: apply only when the given columns match
kwargs: keyword arguments to pass to the function
Returns:
DataFrame with index and columns equal to the steps argument
"""
steps = self.index
r = pd.DataFrame(index=steps, columns=steps)
for i, s1 in enumerate(steps):
j = range(i+1 if symmetric else len(steps))
if not diagonal:
j.remove(i)
other = set(steps[j])
if block is not None:
df = self.reset_index()
df = df.merge(df, on=block)
other &= set(df[df.index_x == s1].index_y)
for s2 in other:
r.ix[s1, s2] = function(s1, s2, **kwargs)
return r
def _assert_step_collection(steps):
for s in steps:
if not isinstance(s, step.Step):
raise ValueError("StepFrame index must consist of drain.step.Step objects")
if len(set(steps)) != len(steps):
raise ValueError("StepFrame steps must be unique")
class StepFrame(pd.DataFrame):
expand = expand
dapply = dapply
def __init__(self, *args, **kwargs):
pd.DataFrame.__init__(self, *args, **kwargs)
_assert_step_collection(self.index.values)
@property
def _constructor(self):
return StepFrame
@property
def _contructor_sliced(self):
return pd.Series
def __str__(self):
return self.expand().__str__()
def to_html(self, *args, **kwargs):
return self.expand().to_html(*args, **kwargs)
# resetting index makes it no longer a StepFrame
def reset_index(self, *args, **kwargs):
return pd.DataFrame(self).reset_index(*args, **kwargs)
class StepSeries(pd.Series):
expand = expand
dapply = dapply
def __init__(self, *args, **kwargs):
pd.Series.__init__(self, *args, **kwargs)
_assert_step_collection(self.index.values)
@property
def _constructor(self):
return StepSeries
@property
def _contructor_expanddim(self):
return StepFrame
def __str__(self):
return self.expand().__str__()
def to_html(self, *args, **kwargs):
return self.expand().to_html(*args, **kwargs)
def reset_index(self, *args, **kwargs):
return pd.Series(self).reset_index(*args, **kwargs)
def _print_unhashable(df, columns=None):
"""
Replace unhashable values in a DataFrame with their string repr
Args:
df: DataFrame
columns: columns to replace, if necessary. Default None replaces all columns.
"""
for c in df.columns if columns is None else columns:
if df.dtypes[c] == object:
try:
df[c].apply(hash)
except TypeError:
df[c] = df[c].dropna().apply(pformat).ix[df.index]
return df
def show_tree(tree, feature_names, max_depth=None):
import wand.image
filename = NamedTemporaryFile(delete=False).name
export_tree(tree, filename, [c.encode('ascii') for c in feature_names], max_depth)
img = wand.image.Image(filename=filename)
return img
def export_tree(clf, filename, feature_names=None, max_depth=None):
import pydot
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data,
feature_names=feature_names, max_depth=max_depth)
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf(filename)
| mit |
zfrenchee/pandas | pandas/tests/indexing/interval/test_interval_new.py | 1 | 7320 | import pytest
import numpy as np
import pandas as pd
from pandas import Series, IntervalIndex, Interval
import pandas.util.testing as tm
pytestmark = pytest.mark.skip(reason="new indexing tests for issue 16316")
class TestIntervalIndex(object):
def setup_method(self, method):
self.s = Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6)))
def test_loc_with_interval(self):
# loc with single label / list of labels:
# - Intervals: only exact matches
# - scalars: those that contain it
s = self.s
expected = 0
result = s.loc[Interval(0, 1)]
assert result == expected
result = s[Interval(0, 1)]
assert result == expected
expected = s.iloc[3:5]
result = s.loc[[Interval(3, 4), Interval(4, 5)]]
tm.assert_series_equal(expected, result)
result = s[[Interval(3, 4), Interval(4, 5)]]
tm.assert_series_equal(expected, result)
# missing or not exact
with pytest.raises(KeyError):
s.loc[Interval(3, 5, closed='left')]
with pytest.raises(KeyError):
s[Interval(3, 5, closed='left')]
with pytest.raises(KeyError):
s[Interval(3, 5)]
with pytest.raises(KeyError):
s.loc[Interval(3, 5)]
with pytest.raises(KeyError):
s[Interval(3, 5)]
with pytest.raises(KeyError):
s.loc[Interval(-2, 0)]
with pytest.raises(KeyError):
s[Interval(-2, 0)]
with pytest.raises(KeyError):
s.loc[Interval(5, 6)]
with pytest.raises(KeyError):
s[Interval(5, 6)]
def test_loc_with_scalar(self):
# loc with single label / list of labels:
# - Intervals: only exact matches
# - scalars: those that contain it
s = self.s
assert s.loc[1] == 0
assert s.loc[1.5] == 1
assert s.loc[2] == 1
# TODO with __getitem__ same rules as loc, or positional ?
# assert s[1] == 0
# assert s[1.5] == 1
# assert s[2] == 1
expected = s.iloc[1:4]
tm.assert_series_equal(expected, s.loc[[1.5, 2.5, 3.5]])
tm.assert_series_equal(expected, s.loc[[2, 3, 4]])
tm.assert_series_equal(expected, s.loc[[1.5, 3, 4]])
expected = s.iloc[[1, 1, 2, 1]]
tm.assert_series_equal(expected, s.loc[[1.5, 2, 2.5, 1.5]])
expected = s.iloc[2:5]
tm.assert_series_equal(expected, s.loc[s >= 2])
def test_loc_with_slices(self):
# loc with slices:
# - Interval objects: only works with exact matches
# - scalars: only works for non-overlapping, monotonic intervals,
# and start/stop select location based on the interval that
# contains them:
# (slice_loc(start, stop) == (idx.get_loc(start), idx.get_loc(stop))
s = self.s
# slice of interval
expected = s.iloc[:3]
result = s.loc[Interval(0, 1):Interval(2, 3)]
tm.assert_series_equal(expected, result)
result = s[Interval(0, 1):Interval(2, 3)]
tm.assert_series_equal(expected, result)
expected = s.iloc[4:]
result = s.loc[Interval(3, 4):]
tm.assert_series_equal(expected, result)
result = s[Interval(3, 4):]
tm.assert_series_equal(expected, result)
with pytest.raises(KeyError):
s.loc[Interval(3, 6):]
with pytest.raises(KeyError):
s[Interval(3, 6):]
with pytest.raises(KeyError):
s.loc[Interval(3, 4, closed='left'):]
with pytest.raises(KeyError):
s[Interval(3, 4, closed='left'):]
# TODO with non-existing intervals ?
# s.loc[Interval(-1, 0):Interval(2, 3)]
# slice of scalar
expected = s.iloc[:3]
tm.assert_series_equal(expected, s.loc[:3])
tm.assert_series_equal(expected, s.loc[:2.5])
tm.assert_series_equal(expected, s.loc[0.1:2.5])
# TODO should this work? (-1 is not contained in any of the Intervals)
# tm.assert_series_equal(expected, s.loc[-1:3])
# TODO with __getitem__ same rules as loc, or positional ?
# tm.assert_series_equal(expected, s[:3])
# tm.assert_series_equal(expected, s[:2.5])
# tm.assert_series_equal(expected, s[0.1:2.5])
# slice of scalar with step != 1
with pytest.raises(NotImplementedError):
s[0:4:2]
def test_loc_with_overlap(self):
idx = IntervalIndex.from_tuples([(1, 5), (3, 7)])
s = Series(range(len(idx)), index=idx)
# scalar
expected = s
result = s.loc[4]
tm.assert_series_equal(expected, result)
result = s[4]
tm.assert_series_equal(expected, result)
result = s.loc[[4]]
tm.assert_series_equal(expected, result)
result = s[[4]]
tm.assert_series_equal(expected, result)
# interval
expected = 0
result = s.loc[pd.interval(1, 5)]
tm.assert_series_equal(expected, result)
result = s[pd.interval(1, 5)]
tm.assert_series_equal(expected, result)
expected = s
result = s.loc[[pd.interval(1, 5), pd.Interval(3, 7)]]
tm.assert_series_equal(expected, result)
result = s[[pd.interval(1, 5), pd.Interval(3, 7)]]
tm.assert_series_equal(expected, result)
with pytest.raises(KeyError):
s.loc[Interval(3, 5)]
with pytest.raises(KeyError):
s.loc[[Interval(3, 5)]]
with pytest.raises(KeyError):
s[Interval(3, 5)]
with pytest.raises(KeyError):
s[[Interval(3, 5)]]
# slices with interval (only exact matches)
expected = s
result = s.loc[pd.interval(1, 5):pd.Interval(3, 7)]
tm.assert_series_equal(expected, result)
result = s[pd.interval(1, 5):pd.Interval(3, 7)]
tm.assert_series_equal(expected, result)
with pytest.raises(KeyError):
s.loc[pd.interval(1, 6):pd.Interval(3, 8)]
with pytest.raises(KeyError):
s[pd.interval(1, 6):pd.Interval(3, 8)]
# slices with scalar raise for overlapping intervals
# TODO KeyError is the appropriate error?
with pytest.raises(KeyError):
s.loc[1:4]
def test_non_unique(self):
idx = IntervalIndex.from_tuples([(1, 3), (3, 7)])
s = pd.Series(range(len(idx)), index=idx)
result = s.loc[Interval(1, 3)]
assert result == 0
result = s.loc[[Interval(1, 3)]]
expected = s.iloc[0:1]
tm.assert_series_equal(expected, result)
def test_non_unique_moar(self):
idx = IntervalIndex.from_tuples([(1, 3), (1, 3), (3, 7)])
s = Series(range(len(idx)), index=idx)
expected = s.iloc[[0, 1]]
result = s.loc[Interval(1, 3)]
tm.assert_series_equal(expected, result)
expected = s
result = s.loc[Interval(1, 3):]
tm.assert_series_equal(expected, result)
expected = s
result = s[Interval(1, 3):]
tm.assert_series_equal(expected, result)
expected = s.iloc[[0, 1]]
result = s[[Interval(1, 3)]]
tm.assert_series_equal(expected, result)
| bsd-3-clause |
buguen/pylayers | pylayers/simul/examples/ex_simulem_fur.py | 3 | 1157 | from pylayers.simul.simulem import *
from pylayers.signal.bsignal import *
from pylayers.measures.mesuwb import *
import matplotlib.pyplot as plt
from pylayers.gis.layout import *
#M=UWBMesure(173)
M=UWBMesure(13)
#M=UWBMesure(1)
cir=TUsignal()
cirf=TUsignal()
#cir.readcir("where2cir-tx001-rx145.mat","Tx001")
#cirf.readcir("where2-furcir-tx001-rx145.mat","Tx001")
cir.readcir("where2cir-tx002-rx012.mat","Tx002")
#cirf.readcir("where2-furcir-tx002-rx012.mat","Tx002")
#cir.readcir("where2cir-tx001-rx001.mat","Tx001")
#cirf.readcir("where2-furcir-tx001-rx001.mat","Tx001")
plt.ion()
fig = plt.figure()
fig.subplots_adjust(hspace=0.5)
ax1 = fig.add_subplot(411,title="points and layout")
L=Layout()
L.load('siradel-cut-fur.ini')
#L.build()
L.showGs(fig=fig,ax=ax1)
ax1.plot(M.tx[0],M.tx[1],'or')
#ax1.plot(M.rx[1][0],M.rx[1][1],'ob')
ax1.plot(M.rx[2][0],M.rx[2][1],'ob')
ax2 = fig.add_subplot(412,title="Measurement")
M.tdd.ch2.plot()
#ax3 = fig.add_subplot(413,title="Simulation with furniture",sharex=ax2,sharey=ax2)
#cirf.plot(col='red')
ax4 = fig.add_subplot(414,title="Simulation",sharex=ax2,sharey=ax2)
cir.plot(col='blue')
plt.show()
| lgpl-3.0 |
AyoubBelhadji/random_matrix_factorization | package/fast_svd.py | 1 | 2031 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 12 11:45:41 2017
@author: ayoubbelhadji1
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pyplot
from scipy.stats import chi2
import pylab as mp
### Parameters
N = 1000 # Number of points
d = 2 # Dimension
s_n = 10 # Number of leverage sampling size
s_num_iterations = 1000 # Number of leverage sampling iterations
mean = [0, 0]
cov = [[1, 0], [0, 1]]
### Data Generation
r_X = np.random.multivariate_normal(mean, cov, N).T
r_X_index = list(range(0,N))
### Computing leverage scores
leverage_scores_r_X = np.sum(r_X*r_X, axis=0)/(np.linalg.norm(r_X)**2)
leverage_sampling = np.zeros((s_n,s_num_iterations))
C = np.zeros((d,s_n))
delta_quadratic_norm_sum = 0
delta_matrix = np.zeros((d,d))
for l in range(1,s_num_iterations):
### Sampling according to leverage scores
leverage_sampling[:,l] = np.random.choice(r_X_index, s_n, p=leverage_scores_r_X,replace=False)
sqrt_p_vector = np.divide(np.ones((d,s_n)),np.sqrt(leverage_scores_r_X[np.ndarray.tolist(leverage_sampling[:,l].astype(int))]))
C = (1/np.sqrt(s_n))*(np.multiply(r_X[:,np.ndarray.tolist(leverage_sampling[:,l].astype(int))],sqrt_p_vector))
delta_quadratic_norm_sum = delta_quadratic_norm_sum + (np.linalg.norm(np.dot(C,C.T) - np.dot(r_X,r_X.T)))**2
delta_matrix = delta_matrix + np.dot(C,C.T)-np.dot(r_X,r_X.T)
delta_quadratic_norm_sum = delta_quadratic_norm_sum/s_num_iterations
delta_matrix = delta_matrix/s_num_iterations
norm_sum_bound = (1/s_n)*np.linalg.norm(r_X)**4
print(delta_quadratic_norm_sum/norm_sum_bound)
print(np.linalg.norm(delta_matrix))
## Plots
#matplotlib.pyplot.scatter(C[0,:],C[1,:])
#matplotlib.pyplot.show()
#matplotlib.pyplot.scatter(r_X[leverage_sampling,0],r_X[leverage_sampling,1])
#matplotlib.pyplot.show()
##empirical_cov = np.cov(r_X.T)
##plot_ellipse(r_X,cov=empirical_cov)
##leverage_empirical_cov = np.cov(r_X[leverage_sampling,:].T)
##plot_ellipse(r_X[leverage_sampling,:],cov=leverage_empirical_cov)
| mit |
evanl/vesa_tough_comparison | vesa/vesa_v02_13/vesa_writing_functions.py | 2 | 13731 | #Author - Evan Leister
import eclipse_cells as ec
import numpy as np
import matplotlib.pyplot as plt
class Injector(object):
def __init__(self, index, x, y, ratio, layer_id, end_days, mass_rate):
self.index = index
self.x = x
self.y = y
self.ratio = ratio
self.layer_id = layer_id
self.end_days = end_days # list of intervals in integer days
self.mass_rate = mass_rate # list of mass rate is given in Mt/yr
self.radius = 1.0
if len(mass_rate) != len(end_days):
print "Mass inflow and interval ends must match"
return 1
def write_injector(self, f):
print "mass rate is"
print self.mass_rate
f.write(', '.join([str(self.index), str(self.x), str(self.y),\
str(self.ratio), str(self.layer_id),\
str(self.radius), str(len(self.mass_rate))]))
f.write('\n')
for i in range(len(self.mass_rate)):
print len(self.mass_rate), len(self.end_days)
f.write(', '.join([str(self.end_days[i]), \
str(self.mass_rate[i])]))
f.write('\n')
return f
def write_injwells(injectors):
f = open("InjWells.txt","w")
print "writing Injwells.txt"
for inj in injectors:
inj.write_injector(f)
f.close()
return 0
def write_system(timestep_days, output_days, simtime_years, output_control, layers):
f = open("System.txt", "w")
print "writing System.txt"
f.write(''.join([str(timestep_days),'\n']))
f.write(''.join([str(output_days),'\n']))
years = simtime_years
f.write(''.join([str(simtime_years),'\n']))
f.write(''.join([output_control,'\n']))
# mass balance output 'string' massbalance nomassbalance
f.write('massbalance \n')
# number of layers in the model [int]
f.write(''.join([str(len(layers)),'\n']))
# file names that contain the grid data
for s in layers:
f.write(s)
f.close()
return 0
class Layer(object):
def __init__(self, layer_name, l_type, l_id, l_co2_rho, l_bri_rho, l_co2_mu,\
l_bri_mu, sc_res, sb_res, c_co2, c_bri, c_roc, cap_rp_id, \
nx, ny, nz = 1, gradient = 10.,\
homogeneous = False, permval = 2000., poroval = False):
"""
self.layer_name : Name of Text File
l_type : Layer Type
l_id : Layer ID
l_co2_rho : CO2 density
self.l_bri_rho : Brine Density
self.l_co2_mu : CO2 viscosity
self.l_bri_mu : brine viscosity
self.sc_res : CO2 residual saturation
self.sb_res : brine residual saturation
self.c_co2 : CO2 compressibility
self.c_bri : Brine Compressibility
self.c_roc : Rock Compressibility
self.cap_rp_id : Capillary - Rel/perm ID
self.gradient : Pressure gradient
"""
self.layer_name = layer_name
self.l_type = l_type
self.l_id = l_id
self.l_co2_rho = l_co2_rho
self.l_bri_rho = l_bri_rho
self.l_co2_mu = l_co2_mu
self.l_bri_mu = l_bri_mu
self.sc_res = sc_res
self.sb_res = sb_res
self.c_co2 = c_co2
self.c_bri = c_bri
self.c_roc = c_roc
self.cap_rp_id = cap_rp_id
# list of GridCell objects
self.grid_cells = []
self.gradient = gradient #[MPa/km]
self.nx = nx
self.ny = ny
self.nz = nz
self.homogeneous = homogeneous
self.permval = permval
self.poroval = poroval
def fill_uniform_grid(self, dx, dy, dz, center_depth, phi, k):
for j in range(self.ny):
for i in range(self.nx):
x = (dx/2. + dx * i)
y = (dy/2. + dy * j)
top_b = -center_depth + dz/2.
bottom_b = -center_depth - dz/2.
if i == (self.nx - 1):
east_bc = 3
else:
east_bc = 1
if j == (self.ny - 1):
north_bc = 3
else:
north_bc = 1
if i == 0:
west_bc = 3
else:
west_bc = 1
if j == 0:
south_bc = 3
else:
south_bc = 1
pressure = -self.gradient * bottom_b * 1000
gc = GridCell(top_b, bottom_b, x, y, dx, dy, phi, k,\
west_bc, east_bc, south_bc, north_bc, pressure)
self.grid_cells.append(gc)
return 0
def plot_perm_data(self, e_cells):
cell_ind = np.zeros(len(e_cells))
anis = np.zeros(len(e_cells))
depth = np.zeros(len(e_cells))
perm = np.zeros(len(e_cells))
poro = np.zeros(len(e_cells))
for i in range(len(e_cells)):
cell_ind[i] = i
perm[i] = e_cells[i].getXPermeability()
anis[i] = e_cells[i].getZPermeability() / \
e_cells[i].getXPermeability()
depth[i] = e_cells[i].getTopZ()
poro[i] = e_cells[i].getPorosity()
print "plotting anisotropy ratio"
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
a = ax1.plot(cell_ind, anis)
ax1.set_xlabel('cell_index []')
ax1.set_ylabel('anisotropy kz/kx')
plt.savefig('ec_anis_cells.png')
plt.close()
print "plotting permeabilityvsdepth"
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
b = ax2.scatter(depth, perm)
ax2.set_xlabel('depth [m]')
ax2.set_ylabel('permeability [md]')
plt.savefig('ec_perm_depth.png')
plt.close()
print "plotting porosity vsdepth"
fig3 = plt.figure()
ax3 = fig3.add_subplot(111)
b = ax3.scatter(depth, poro)
ax3.set_xlabel('depth [m]')
ax3.set_ylabel('porosity []')
plt.savefig('ec_poro_depth.png')
plt.close()
return 0
def fill_nonuniform_grid(self, e_cells):
print "Filling nonuniform grid"
count = 0
k = 0
columnz = []
columnk = []
columnphi = []
check_col = self.nz
check_plane = self.nx*self.ny
for j in range(self.ny-1,-1,-1):
for i in range(0,self.nx):
for k in range(0,self.nz):
ind = (i + self.nx *j) + check_plane*k
if i == 32 and j == 77:
print k, e_cells[ind].getZPermeability()
if e_cells[ind].getXPermeability() > 1:
columnz.append(e_cells[ind].getTopZ())
columnk.append(e_cells[ind].getXPermeability())
columnphi.append(e_cells[ind].getPorosity())
# spits out the averages after the column index is filled.
kmean, kvar = stats(columnk)
if self.homogeneous == True:
k_write = self.permval
else:
k_write = kmean
if self.poroval == False:
phimean , phivar = stats(columnphi)
else:
phimean = self.poroval
top_b = -columnz[0]
bottom_b = -columnz[-1]
x = e_cells[ i + self.nx * j].getCenterX()
y = e_cells[ i + self.nx * j].getCenterY()
# get correct dx and dy
if i != (self.nx-1):
x_1 = e_cells[i+1 + self.nx*j].getCenterX()
x_0 = e_cells[ i + self.nx * j].getCenterX()
dx = (x_1 - x_0)
else:
x_1 = e_cells[i + self.nx*j].getCenterX()
x_0 = e_cells[ i-1 + self.nx * j].getCenterX()
dx = (x_1 - x_0)
if j != 0:
y_1 = e_cells[i + self.nx *(j-1)].getCenterY()
y_0 = e_cells[i + self.nx * j].getCenterY()
dy = y_1 - y_0
else:
y_1 = e_cells[i + self.nx *j].getCenterY()
y_0 = e_cells[i + self.nx *(j+1)].getCenterY()
dy = y_1 - y_0
#boundary condition key [integers]
# 1 = internal
# 3 = constant pressure
# 4 = no flow
if i == (self.nx - 1):
east_bc = 3
else:
east_bc = 1
if j == 0:
north_bc = 3
else:
north_bc = 1
if i == 0:
west_bc = 3
else:
west_bc = 1
if j == (self.ny - 1):
south_bc = 3
else:
south_bc = 1
pressure = -self.gradient * bottom_b * 1000
gc = GridCell(top_b, bottom_b, x, y, dx, dy, phimean, k_write,\
west_bc, east_bc, south_bc, north_bc, pressure)
self.grid_cells.append(gc)
# increment loops
count += 1
columnz = []
columnk = []
columnphi = []
k = 0
return 0
def write_layer(self):
print "writing layer " + self.layer_name
f = open("".join([self.layer_name,'.txt']),"w")
g = open("thickness.txt","w")
# layer type
f.write(''.join([str(self.l_type) + '\n']))
# layer id
f.write(''.join([str(self.l_id) + '\n']))
# fluid parameters
f.write(''.join([str(self.l_co2_rho),'\n']))
f.write(''.join([str(self.l_bri_rho),'\n']))
f.write(''.join([str(self.l_co2_mu),'\n']))
f.write(''.join([str(self.l_bri_mu),'\n']))
f.write(''.join([str(self.sc_res),'\n']))
f.write(''.join([str(self.sb_res),'\n']))
f.write(''.join([str(self.c_co2),'\n']))
f.write(''.join([str(self.c_bri),'\n']))
f.write(''.join([str(self.c_roc),'\n']))
if self.cap_rp_id == 0:
f.write(''.join([str(self.cap_rp_id),'\n']))
elif self.cap_rp_id == 1:
lamb = 3.
p_entry = 3000000.
f.write(''.join([str(self.cap_rp_id), ', ',\
str(lamb), ', ', \
str(p_entry), '\n']))
self.plot_cap_rp_bc(lamb, p_entry)
# number of cells
f.write(''.join([str(self.nx * self.ny), '\n']))
for cel in self.grid_cells:
g.write(''.join([str((cel.top_b - cel.bottom_b)),', ']))
cel.write_cell(f)
f.close()
return 0
def bc_cap(self, pentry, lamb):
return pcap
def plot_cap_rp_bc(self, lamb, p_entry):
sb = np.linspace(self.sb_res,1.)
pc = np.zeros(len(sb))
krb = np.zeros(len(sb))
krc = np.zeros(len(sb))
for i in range(len(sb)):
seff = (sb[i] - self.sb_res) / (1 - self.sb_res)
pc[i] = p_entry * pow(seff, -1/lamb)
krb[i] = pow(seff, (2 + 3 * lamb)/lamb)
krc[i] = (1 - seff)**2 * (1 - pow(seff, (2 + lamb) / lamb))
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
ax1.plot(sb, pc)
ax1.set_xlabel('sb []')
ax1.set_ylabel('pcap [Pa]')
plt.savefig('pcap.png')
plt.clf()
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.plot(sb, krb, label = 'krb')
ax2.plot(sb, krc, label = 'krc')
ax2.legend(loc=1)
ax2.set_xlabel('sb')
plt.savefig('relperm.png')
return 0
class GridCell(object):
def __init__(self, top_b, bottom_b, x, y, dx, dy, phi, k,\
west_bc, east_bc, south_bc, north_bc, pressure):
self.top_b = top_b
self.bottom_b = bottom_b
self.x = x
self.y = y
self.dx = dx
self.dy = dy
self.phi = phi
self.k = k
self.west_bc = west_bc
self.east_bc = east_bc
self.south_bc = south_bc
self.north_bc = north_bc
self.pressure = pressure
def write_cell(self, f):
f.write(''.join([str(self.x),', ']))
f.write(''.join([str(self.y),', ']))
f.write(''.join([str(self.dx),', ']))
f.write(''.join([str(self.dy),', ']))
# porosity
f.write(''.join(['%.4f' % self.phi ,', ']))
# NOTE: Enters permeability for all dimensions since the formation is isotropic
# in the planar directions and the z permeability is not used in VESA.
xperm = self.k
yperm = self.k
zperm = self.k
f.write(''.join(['%.0f' % xperm, ', ' ,'%.0f' % yperm, ', ',\
'%.0f' % zperm, ', ']))
f.write(''.join(['%.4f' % self.bottom_b, ', ']))
f.write(''.join(['%.4f' % self.top_b, ', ']))
# initial CO2 saturation
f.write('0.0, ')
# past CO2 saturation
f.write('0.0, ')
#initial pressure at bottom of formation [Pa]
f.write(''.join(['%.0f' % self.pressure, ', ']))
f.write(''.join([str(self.east_bc), ', ']))
f.write(''.join([str(self.north_bc), ', ']))
f.write(''.join([str(self.west_bc), ', ']))
f.write(''.join([str(self.south_bc), ', ']))
f.write('\n')
return f
def stats(data):
sum_s = 0.0
for value in data:
sum_s += value
mean = sum_s/len(data)
sum_s = 0.0
for value in data:
sum_s += (value - mean)**2
variance = sum_s/(len(data)-1)
return(mean,variance)
| mit |
idlead/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 17 | 5726 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', prop=dict(size='small'))
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
r-mart/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
justincassidy/scikit-learn | sklearn/linear_model/least_angle.py | 57 | 49338 | """
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_X_y
from ..cross_validation import check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _check_copy_and_writeable(array, copy=False):
if copy or not array.flags.writeable:
return array.copy()
return array
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
X_train = _check_copy_and_writeable(X_train, copy)
y_train = _check_copy_and_writeable(y_train, copy)
X_test = _check_copy_and_writeable(X_test, copy)
y_test = _check_copy_and_writeable(y_test, copy)
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True):
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Read more in the :ref:`User Guide <least_angle_regression>`.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y, y_numeric=True)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
| bsd-3-clause |
rosswhitfield/mantid | qt/python/mantidqt/plotting/test/test_figuretype.py | 3 | 2627 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
#
from __future__ import absolute_import
# std imports
from unittest import TestCase, main
# thirdparty imports
import matplotlib
matplotlib.use('AGG') # noqa
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
# local imports
from mantidqt.plotting.figuretype import figure_type, FigureType
class FigureTypeTest(TestCase):
def test_figure_type_empty_figure_returns_empty(self):
self.assertEqual(FigureType.Empty, figure_type(plt.figure()))
def test_subplot_with_multiple_plots_returns_other(self):
ax = plt.subplot(221)
self.assertEqual(FigureType.Other, figure_type(ax.figure))
def test_line_plot_returns_line(self):
ax = plt.subplot(111)
ax.plot([1])
self.assertEqual(FigureType.Line, figure_type(ax.figure))
def test_error_plot_returns_error(self):
ax = plt.subplot(111)
ax.errorbar([1], [1], yerr=[0.01])
self.assertEqual(FigureType.Errorbar, figure_type(ax.figure))
def test_image_plot_returns_image(self):
ax = plt.subplot(111)
ax.imshow([[1], [1]])
self.assertEqual(FigureType.Image, figure_type(ax.figure))
def test_surface_plot_returns_surface(self):
a = np.array([[1]])
fig, ax = plt.subplots(subplot_kw={'projection': 'mantid3d'})
ax.plot_surface(a, a, a)
self.assertEqual(FigureType.Surface, figure_type(ax.figure))
def test_wireframe_plot_returns_wireframe(self):
a = np.array([[1]])
ax = plt.subplot(111, projection='3d')
ax.plot_wireframe(a, a, a)
self.assertEqual(FigureType.Wireframe, figure_type(ax.figure))
def test_contour_plot_returns_contour(self):
ax = plt.subplot(111)
ax.imshow([[1], [1]])
ax.contour([[1, 1], [1, 1]])
self.assertEqual(FigureType.Contour, figure_type(ax.figure))
def test_mesh_plot_returns_mesh(self):
a = np.array([[[1, 1, 1], [2, 2, 2], [3, 3, 3]]])
mesh_polygon = Poly3DCollection(a)
fig, ax = plt.subplots(subplot_kw={'projection': 'mantid3d'})
ax.add_collection3d(mesh_polygon)
self.assertEqual(FigureType.Mesh, figure_type(ax.figure))
if __name__ == '__main__':
main()
| gpl-3.0 |
choupi/NDHUDLWorkshop | mnist/xgboost/xg.py | 1 | 2143 | '''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
import xgboost as xgb
from sklearn.metrics import *
batch_size = 128
nb_classes = 10
nb_epoch = 12
# input image dimensions
img_rows, img_cols = 28, 28
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
nb_pool = 2
# convolution kernel size
nb_conv = 3
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], img_rows*img_cols)
X_test = X_test.reshape(X_test.shape[0], img_rows*img_cols)
#X_train = X_train.astype('float32')
#X_test = X_test.astype('float32')
print('X_train shape:', X_train.shape)
#print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
#Y_train = np_utils.to_categorical(y_train, nb_classes)
#Y_test = np_utils.to_categorical(y_test, nb_classes)
Y_train = [int(y) for y in y_train]
Y_test = [int(y) for y in y_test]
param = {}
param['objective'] = 'multi:softprob'
param['booster'] = 'gbtree'
param['num_class'] = 10
param['eta'] = 0.1
param['max_depth'] = 4
param['min_child_weight'] = 0.316
param['silent'] = 1
plst = list(param.items())
num_round = 12
model_file='mnist.model'
xgmat = xgb.DMatrix(X_train, label=Y_train)
watchlist = [ (xgmat,'train') ]
bst = xgb.train(plst, xgmat, num_round, watchlist)
bst.save_model(model_file)
xgmat = xgb.DMatrix(X_test)
bst = xgb.Booster(model_file = model_file)
y_predict = bst.predict(xgmat)
#y_predict = bst.predict_proba(xgmat)
print(y_predict)
print(log_loss(Y_test, y_predict))
#print(y_predict)
#predict=y_predict
predict=[]
for yy in y_predict: predict.append(np.argmax(yy))
#print(predict)
print(f1_score(Y_test, predict))
print(classification_report(Y_test, predict))
print(confusion_matrix(Y_test, predict))
| mit |
sahat/bokeh | bokeh/protocol.py | 1 | 3541 | import json
import logging
import time
import datetime as dt
import numpy as np
from six.moves import cPickle as pickle
from .utils import get_ref
try:
import pandas as pd
is_pandas = True
except ImportError:
is_pandas = False
try:
from dateutil.relativedelta import relativedelta
is_dateutil = True
except ImportError:
is_dateutil = False
log = logging.getLogger(__name__)
millifactor = 10 ** 6.
class BokehJSONEncoder(json.JSONEncoder):
def transform_series(self, obj):
"""transform series
"""
vals = obj.values
return self.transform_array(vals)
def transform_array(self, obj):
"""Transform arrays into lists of json safe types
also handles pandas series, and replacing
nans and infs with strings
"""
## not quite correct, truncates to ms..
if obj.dtype.kind == 'M':
return obj.astype('datetime64[ms]').astype('int64').tolist()
elif obj.dtype.kind in ('u', 'i', 'f'):
return self.transform_numerical_array(obj)
return obj.tolist()
def transform_numerical_array(self, obj):
"""handles nans/inf conversion
"""
if not np.isnan(obj).any() and not np.isinf(obj).any():
return obj.tolist()
else:
transformed = obj.astype('object')
transformed[np.isnan(obj)] = 'NaN'
transformed[np.isposinf(obj)] = 'Infinity'
transformed[np.isneginf(obj)] = '-Infinity'
return transformed.tolist()
def transform_python_types(self, obj):
"""handle special scalars, default to default json encoder
"""
if is_pandas and isinstance(obj, pd.tslib.Timestamp):
return obj.value / millifactor
elif isinstance(obj, np.float):
return float(obj)
elif isinstance(obj, np.int):
return int(obj)
elif isinstance(obj, (dt.datetime, dt.date)):
return time.mktime(obj.timetuple()) * 1000.
elif isinstance(obj, dt.time):
return (obj.hour*3600 + obj.minute*60 + obj.second)*1000 + obj.microsecond
elif is_dateutil and isinstance(obj, relativedelta):
return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,
minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)
else:
return super(BokehJSONEncoder, self).default(obj)
def default(self, obj):
#argh! local import!
from .plot_object import PlotObject
from .properties import HasProps
from .colors import Color
## array types
if is_pandas and isinstance(obj, (pd.Series, pd.Index)):
return self.transform_series(obj)
elif isinstance(obj, np.ndarray):
return self.transform_array(obj)
elif isinstance(obj, PlotObject):
return get_ref(obj)
elif isinstance(obj, HasProps):
return obj.to_dict()
elif isinstance(obj, Color):
return obj.toCSS()
else:
return self.transform_python_types(obj)
def serialize_json(obj, encoder=BokehJSONEncoder, **kwargs):
return json.dumps(obj, cls=encoder, **kwargs)
deserialize_json = json.loads
serialize_web = serialize_json
deserialize_web = deserialize_json
def status_obj(status):
return {'msgtype': 'status',
'status': status}
def error_obj(error_msg):
return {
'msgtype': 'error',
'error_msg': error_msg}
| bsd-3-clause |
neutrons/FastGR | addie/processing/mantid/master_table/table_plot_handler.py | 1 | 6631 | from __future__ import (absolute_import, division, print_function)
from qtpy.QtCore import Qt
import os
import numpy as np
import glob
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
from addie.processing.idl.sample_environment_handler import SampleEnvironmentHandler
class TablePlotHandler:
def __init__(self, parent=None):
self.parent = parent
def plot_sofq(self):
sofq_datasets = self._plot_fetch_data()
self._plot_datasets(sorted(sofq_datasets,
key=lambda k: int(k['run'])),
title='S(Q)')
def plot_sofq_diff_first_run_row(self):
sofq_datasets = self._plot_fetch_data()
sofq_base = dict(sofq_datasets[0])
for sofq in sorted(sofq_datasets,
key=lambda k: int(k['run'])):
sofq['y'] = sofq['y'] - sofq_base['y']
self._plot_datasets(sofq_datasets,
shift_value=0.2,
title='S(Q) - S(Q) for run '+sofq_base['run'])
def plot_sofq_diff_average_row(self):
sofq_datasets = self._plot_fetch_data()
sofq_data = [ sofq['y'] for sofq in sofq_datasets ]
sofq_avg = np.average(sofq_data,axis=0)
for sofq in sorted(sofq_datasets, key=lambda k: int(k['run'])):
sofq['y'] = sofq['y'] - sofq_avg
self._plot_datasets(sofq_datasets,
shift_value=0.2,
title='S(Q) - <S(Q)>')
def _plot_temperature(self, samp_env_choice=None):
file_list = self._plot_fetch_files(file_type='nexus')
samp_env = SampleEnvironmentHandler(samp_env_choice)
datasets = list()
for data in file_list:
samp_x, samp_y = samp_env.getDataFromFile(data['file'], 'samp')
envi_x, envi_y = samp_env.getDataFromFile(data['file'], 'envi')
datasets.append({'run': data['run'] + '_samp',
'x': samp_x,
'y': samp_y,
'linestyle': '-'})
datasets.append({'run': None,
'x': envi_x,
'y': envi_y,
'linestyle': '--'})
self._plot_datasets(sorted(datasets,
key=lambda k: k['run']),
shift_value=0.0,
title='Temperature: ' + samp_env_choice)
# utilities functions
def _plot_fetch_data(self):
file_list = self._plot_fetch_files(file_type='SofQ')
for data in file_list:
with open(data['file'], 'r') as handle:
x, y, e = np.loadtxt(handle, unpack=True)
data['x'] = x
data['y'] = y
return file_list
def _plot_fetch_files(self, file_type='SofQ'):
if file_type == 'SofQ':
search_dir = './SofQ'
prefix = 'NOM_'
suffix = 'SQ.dat'
elif file_type == 'nexus':
cwd = os.getcwd()
search_dir = cwd[:cwd.find('shared')] + '/nexus'
prefix = 'NOM_'
suffix = '.nxs.h5'
# ipts = int(re.search(r"IPTS-(\d*)\/", os.getcwd()).group(1))
_row = self.current_row
_row_runs = self._collect_metadata(row_index=_row)['runs'].split(',')
output_list = list()
file_list = [a_file for a_file in glob.glob(search_dir + '/' + prefix + '*')]
for run in _row_runs:
the_file = search_dir + '/' + prefix + str(run) + suffix
if the_file in file_list:
output_list.append({'file': the_file, 'run': run})
return output_list
def _plot_datasets(self,datasets,shift_value=1.0,cmap_choice='inferno',title=None):
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# configure plot
cmap = plt.get_cmap(cmap_choice)
cNorm = colors.Normalize(vmin=0, vmax=len(datasets) )
scalarMap = cm.ScalarMappable(norm=cNorm, cmap=cmap)
mrks=[0,-1]
# plot data
shifter = 0.0
for idx, data in enumerate(datasets):
data['y'] += shifter
colorVal = scalarMap.to_rgba(idx)
if 'linestyle' in data:
ax.plot(data['x'],data['y'],data['linestyle']+'o',label=data['run'],color=colorVal,markevery=mrks,)
else:
ax.plot(data['x'],data['y'],label=data['run'],color=colorVal,markevery=mrks)
shifter += shift_value
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1], title='Runs', loc='center left',bbox_to_anchor=(1,0.5))
if title:
fig.suptitle(title)
plt.show()
def _collect_metadata(self, row_index=-1):
if row_index == -1:
return []
_name = self.retrieve_item_text(row_index, 1)
_runs = self.retrieve_item_text(row_index, 2)
_sample_formula = self.retrieve_item_text(row_index, 3)
_mass_density = self.retrieve_item_text(row_index, 4)
_radius = self.retrieve_item_text(row_index, 5)
_packing_fraction = self.retrieve_item_text(row_index, 6)
_sample_shape = self._retrieve_sample_shape(row_index)
_do_abs_correction = self._retrieve_do_abs_correction(row_index)
_metadata = {'name': _name,
'runs': _runs,
'sample_formula': _sample_formula,
'mass_density': _mass_density,
'radius': _radius,
'packing_fraction': _packing_fraction,
'sample_shape': _sample_shape,
'do_abs_correction': _do_abs_correction}
return _metadata
def retrieve_item_text(self, row, column):
_item = self.parent.table.item(row, column)
if _item is None:
return ''
else:
return str(_item.text())
def _retrieve_sample_shape(self, row_index):
_widget = self.parent.table.cellWidget(row_index, 7)
_selected_index = _widget.currentIndex()
_sample_shape = _widget.itemText(_selected_index)
return _sample_shape
def _retrieve_do_abs_correction(self, row_index):
_widget = self.parent.table.cellWidget(row_index, 8).children()[1]
if (_widget.checkState() == Qt.Checked):
return 'go'
else:
return 'nogo'
| mit |
rajat1994/scikit-learn | sklearn/linear_model/coordinate_descent.py | 43 | 75144 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Gael Varoquaux <gael.varoquaux@inria.fr>
#
# License: BSD 3 clause
import sys
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from .base import LinearModel, _pre_fit
from ..base import RegressorMixin
from .base import center_data, sparse_center_data
from ..utils import check_array, check_X_y, deprecated
from ..utils.validation import check_random_state
from ..cross_validation import check_cv
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import xrange
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.validation import column_or_1d
from ..utils import ConvergenceWarning
from . import cd_fast
###############################################################################
# Paths functions
def _alpha_grid(X, y, Xy=None, l1_ratio=1.0, fit_intercept=True,
eps=1e-3, n_alphas=100, normalize=False, copy_X=True):
""" Compute the grid of alpha values for elastic net parameter search
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication
y : ndarray, shape (n_samples,)
Target values
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed.
l1_ratio : float
The elastic net mixing parameter, with ``0 <= l1_ratio <= 1``.
For ``l1_ratio = 0`` the penalty is an L2 penalty. ``For
l1_ratio = 1`` it is an L1 penalty. For ``0 < l1_ratio <
1``, the penalty is a combination of L1 and L2.
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean, default True
Whether to fit an intercept or not
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
"""
n_samples = len(y)
sparse_center = False
if Xy is None:
X_sparse = sparse.isspmatrix(X)
sparse_center = X_sparse and (fit_intercept or normalize)
X = check_array(X, 'csc',
copy=(copy_X and fit_intercept and not X_sparse))
if not X_sparse:
# X can be touched inplace thanks to the above line
X, y, _, _, _ = center_data(X, y, fit_intercept,
normalize, copy=False)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
if sparse_center:
# Workaround to find alpha_max for sparse matrices.
# since we should not destroy the sparsity of such matrices.
_, _, X_mean, _, X_std = sparse_center_data(X, y, fit_intercept,
normalize)
mean_dot = X_mean * np.sum(y)
if Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
if sparse_center:
if fit_intercept:
Xy -= mean_dot[:, np.newaxis]
if normalize:
Xy /= X_std[:, np.newaxis]
alpha_max = (np.sqrt(np.sum(Xy ** 2, axis=1)).max() /
(n_samples * l1_ratio))
if alpha_max <= np.finfo(float).resolution:
alphas = np.empty(n_alphas)
alphas.fill(np.finfo(float).resolution)
return alphas
return np.logspace(np.log10(alpha_max * eps), np.log10(alpha_max),
num=n_alphas)[::-1]
def lasso_path(X, y, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute Lasso path with coordinate descent
The Lasso optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,), or (n_samples, n_outputs)
Target values
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
positive : bool, default False
If set to True, forces coefficients to be positive.
return_n_iter : bool
whether to return the number of iterations or not.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
Notes
-----
See examples/linear_model/plot_lasso_coordinate_descent_path.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
Note that in certain cases, the Lars solver may be significantly
faster to implement this functionality. In particular, linear
interpolation can be used to retrieve model coefficients between the
values output by lars_path
Examples
---------
Comparing lasso_path and lars_path with interpolation:
>>> X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
>>> y = np.array([1, 2, 3.1])
>>> # Use lasso_path to compute a coefficient path
>>> _, coef_path, _ = lasso_path(X, y, alphas=[5., 1., .5])
>>> print(coef_path)
[[ 0. 0. 0.46874778]
[ 0.2159048 0.4425765 0.23689075]]
>>> # Now use lars_path and 1D linear interpolation to compute the
>>> # same path
>>> from sklearn.linear_model import lars_path
>>> alphas, active, coef_path_lars = lars_path(X, y, method='lasso')
>>> from scipy import interpolate
>>> coef_path_continuous = interpolate.interp1d(alphas[::-1],
... coef_path_lars[:, ::-1])
>>> print(coef_path_continuous([5., 1., .5]))
[[ 0. 0. 0.46915237]
[ 0.2159048 0.4425765 0.23668876]]
See also
--------
lars_path
Lasso
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
return enet_path(X, y, l1_ratio=1., eps=eps, n_alphas=n_alphas,
alphas=alphas, precompute=precompute, Xy=Xy,
copy_X=copy_X, coef_init=coef_init, verbose=verbose,
positive=positive, **params)
def enet_path(X, y, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
precompute='auto', Xy=None, copy_X=True, coef_init=None,
verbose=False, return_n_iter=False, positive=False, **params):
"""Compute elastic net path with coordinate descent
The elastic net optimization function varies for mono and multi-outputs.
For mono-output tasks it is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
For multi-output tasks it is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as Fortran-contiguous data to avoid
unnecessary memory duplication. If ``y`` is mono-output then ``X``
can be sparse.
y : ndarray, shape (n_samples,) or (n_samples, n_outputs)
Target values
l1_ratio : float, optional
float between 0 and 1 passed to elastic net (scaling between
l1 and l2 penalties). ``l1_ratio=1`` corresponds to the Lasso
eps : float
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``
n_alphas : int, optional
Number of alphas along the regularization path
alphas : ndarray, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
Xy : array-like, optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
coef_init : array, shape (n_features, ) | None
The initial values of the coefficients.
verbose : bool or integer
Amount of verbosity.
params : kwargs
keyword arguments passed to the coordinate descent solver.
return_n_iter : bool
whether to return the number of iterations or not.
positive : bool, default False
If set to True, forces coefficients to be positive.
Returns
-------
alphas : array, shape (n_alphas,)
The alphas along the path where models are computed.
coefs : array, shape (n_features, n_alphas) or \
(n_outputs, n_features, n_alphas)
Coefficients along the path.
dual_gaps : array, shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
n_iters : array-like, shape (n_alphas,)
The number of iterations taken by the coordinate descent optimizer to
reach the specified tolerance for each alpha.
(Is returned when ``return_n_iter`` is set to True).
Notes
-----
See examples/plot_lasso_coordinate_descent_path.py for an example.
See also
--------
MultiTaskElasticNet
MultiTaskElasticNetCV
ElasticNet
ElasticNetCV
"""
# We expect X and y to be already float64 Fortran ordered when bypassing
# checks
check_input = 'check_input' not in params or params['check_input']
pre_fit = 'check_input' not in params or params['pre_fit']
if check_input:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
y = check_array(y, 'csc', dtype=np.float64, order='F', copy=False,
ensure_2d=False)
if Xy is not None:
Xy = check_array(Xy, 'csc', dtype=np.float64, order='F',
copy=False,
ensure_2d=False)
n_samples, n_features = X.shape
multi_output = False
if y.ndim != 1:
multi_output = True
_, n_outputs = y.shape
# MultiTaskElasticNet does not support sparse matrices
if not multi_output and sparse.isspmatrix(X):
if 'X_mean' in params:
# As sparse matrices are not actually centered we need this
# to be passed to the CD solver.
X_sparse_scaling = params['X_mean'] / params['X_std']
else:
X_sparse_scaling = np.zeros(n_features)
# X should be normalized and fit already if function is called
# from ElasticNet.fit
if pre_fit:
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, Xy, precompute, normalize=False,
fit_intercept=False,
copy=False, Xy_precompute_order='F')
if alphas is None:
# No need to normalize of fit_intercept: it has been done
# above
alphas = _alpha_grid(X, y, Xy=Xy, l1_ratio=l1_ratio,
fit_intercept=False, eps=eps, n_alphas=n_alphas,
normalize=False, copy_X=False)
else:
alphas = np.sort(alphas)[::-1] # make sure alphas are properly ordered
n_alphas = len(alphas)
tol = params.get('tol', 1e-4)
max_iter = params.get('max_iter', 1000)
dual_gaps = np.empty(n_alphas)
n_iters = []
rng = check_random_state(params.get('random_state', None))
selection = params.get('selection', 'cyclic')
if selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (selection == 'random')
if not multi_output:
coefs = np.empty((n_features, n_alphas), dtype=np.float64)
else:
coefs = np.empty((n_outputs, n_features, n_alphas),
dtype=np.float64)
if coef_init is None:
coef_ = np.asfortranarray(np.zeros(coefs.shape[:-1]))
else:
coef_ = np.asfortranarray(coef_init)
for i, alpha in enumerate(alphas):
l1_reg = alpha * l1_ratio * n_samples
l2_reg = alpha * (1.0 - l1_ratio) * n_samples
if not multi_output and sparse.isspmatrix(X):
model = cd_fast.sparse_enet_coordinate_descent(
coef_, l1_reg, l2_reg, X.data, X.indices,
X.indptr, y, X_sparse_scaling,
max_iter, tol, rng, random, positive)
elif multi_output:
model = cd_fast.enet_coordinate_descent_multi_task(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random)
elif isinstance(precompute, np.ndarray):
# We expect precompute to be already Fortran ordered when bypassing
# checks
if check_input:
precompute = check_array(precompute, 'csc', dtype=np.float64,
order='F')
model = cd_fast.enet_coordinate_descent_gram(
coef_, l1_reg, l2_reg, precompute, Xy, y, max_iter,
tol, rng, random, positive)
elif precompute is False:
model = cd_fast.enet_coordinate_descent(
coef_, l1_reg, l2_reg, X, y, max_iter, tol, rng, random,
positive)
else:
raise ValueError("Precompute should be one of True, False, "
"'auto' or array-like")
coef_, dual_gap_, eps_, n_iter_ = model
coefs[..., i] = coef_
dual_gaps[i] = dual_gap_
n_iters.append(n_iter_)
if dual_gap_ > eps_:
warnings.warn('Objective did not converge.' +
' You might want' +
' to increase the number of iterations',
ConvergenceWarning)
if verbose:
if verbose > 2:
print(model)
elif verbose > 1:
print('Path: %03i out of %03i' % (i, n_alphas))
else:
sys.stderr.write('.')
if return_n_iter:
return alphas, coefs, dual_gaps, n_iters
return alphas, coefs, dual_gaps
###############################################################################
# ElasticNet model
class ElasticNet(LinearModel, RegressorMixin):
"""Linear regression with combined L1 and L2 priors as regularizer.
Minimizes the objective function::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
where::
alpha = a + b and l1_ratio = a / (a + b)
The parameter l1_ratio corresponds to alpha in the glmnet R package while
alpha corresponds to the lambda parameter in glmnet. Specifically, l1_ratio
= 1 is the lasso penalty. Currently, l1_ratio <= 0.01 is not reliable,
unless you supply your own sequence of alpha.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
alpha : float
Constant that multiplies the penalty terms. Defaults to 1.0
See the notes for the exact mathematical meaning of this
parameter.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` with the Lasso object is not advised
and you should prefer the LinearRegression object.
l1_ratio : float
The ElasticNet mixing parameter, with ``0 <= l1_ratio <= 1``. For
``l1_ratio = 0`` the penalty is an L2 penalty. ``For l1_ratio = 1`` it
is an L1 penalty. For ``0 < l1_ratio < 1``, the penalty is a
combination of L1 and L2.
fit_intercept : bool
Whether the intercept should be estimated or not. If ``False``, the
data is assumed to be already centered.
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Notes
-----
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
SGDRegressor: implements elastic net regression with incremental training.
SGDClassifier: implements logistic regression with elastic net penalty
(``SGDClassifier(loss="log", penalty="elasticnet")``).
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, precompute=False, max_iter=1000,
copy_X=True, tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.l1_ratio = l1_ratio
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.positive = positive
self.intercept_ = 0.0
self.random_state = random_state
self.selection = selection
def fit(self, X, y, check_input=True):
"""Fit model with coordinate descent.
Parameters
-----------
X : ndarray or scipy.sparse matrix, (n_samples, n_features)
Data
y : ndarray, shape (n_samples,) or (n_samples, n_targets)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
if self.alpha == 0:
warnings.warn("With alpha=0, this algorithm does not converge "
"well. You are advised to use the LinearRegression "
"estimator", stacklevel=2)
if self.precompute == 'auto':
warnings.warn("Setting precompute to 'auto', was found to be "
"slower even when n_samples > n_features. Hence "
"it will be removed in 0.18.",
DeprecationWarning, stacklevel=2)
# We expect X and y to be already float64 Fortran ordered arrays
# when bypassing checks
if check_input:
X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float64,
order='F',
copy=self.copy_X and self.fit_intercept,
multi_output=True, y_numeric=True)
X, y, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X, y, None, self.precompute, self.normalize,
self.fit_intercept, copy=False, Xy_precompute_order='F')
if y.ndim == 1:
y = y[:, np.newaxis]
if Xy is not None and Xy.ndim == 1:
Xy = Xy[:, np.newaxis]
n_samples, n_features = X.shape
n_targets = y.shape[1]
if self.selection not in ['cyclic', 'random']:
raise ValueError("selection should be either random or cyclic.")
if not self.warm_start or self.coef_ is None:
coef_ = np.zeros((n_targets, n_features), dtype=np.float64,
order='F')
else:
coef_ = self.coef_
if coef_.ndim == 1:
coef_ = coef_[np.newaxis, :]
dual_gaps_ = np.zeros(n_targets, dtype=np.float64)
self.n_iter_ = []
for k in xrange(n_targets):
if Xy is not None:
this_Xy = Xy[:, k]
else:
this_Xy = None
_, this_coef, this_dual_gap, this_iter = \
self.path(X, y[:, k],
l1_ratio=self.l1_ratio, eps=None,
n_alphas=None, alphas=[self.alpha],
precompute=precompute, Xy=this_Xy,
fit_intercept=False, normalize=False, copy_X=True,
verbose=False, tol=self.tol, positive=self.positive,
X_mean=X_mean, X_std=X_std, return_n_iter=True,
coef_init=coef_[k], max_iter=self.max_iter,
random_state=self.random_state,
selection=self.selection,
check_input=False,
pre_fit=False)
coef_[k] = this_coef[:, 0]
dual_gaps_[k] = this_dual_gap[0]
self.n_iter_.append(this_iter[0])
if n_targets == 1:
self.n_iter_ = self.n_iter_[0]
self.coef_, self.dual_gap_ = map(np.squeeze, [coef_, dual_gaps_])
self._set_intercept(X_mean, y_mean, X_std)
# return self for chaining fit and predict calls
return self
@property
def sparse_coef_(self):
""" sparse representation of the fitted coef """
return sparse.csr_matrix(self.coef_)
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Decision function of the linear model
Parameters
----------
X : numpy array or scipy.sparse matrix of shape (n_samples, n_features)
Returns
-------
T : array, shape (n_samples,)
The predicted decision function
"""
check_is_fitted(self, 'n_iter_')
if sparse.isspmatrix(X):
return np.ravel(safe_sparse_dot(self.coef_, X.T, dense_output=True)
+ self.intercept_)
else:
return super(ElasticNet, self)._decision_function(X)
###############################################################################
# Lasso model
class Lasso(ElasticNet):
"""Linear Model trained with L1 prior as regularizer (aka the Lasso)
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Technically the Lasso model is optimizing the same objective function as
the Elastic Net with ``l1_ratio=1.0`` (no L2 penalty).
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1 term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by the :class:`LinearRegression` object. For numerical
reasons, using ``alpha = 0`` is with the Lasso object is not advised
and you should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument. For sparse input
this option is always ``True`` to preserve sparsity.
WARNING : The ``'auto'`` option is deprecated and will
be removed in 0.18.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
sparse_coef_ : scipy.sparse matrix, shape (n_features, 1) | \
(n_targets, n_features)
``sparse_coef_`` is a readonly property derived from ``coef_``
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
n_iter_ : int | array-like, shape (n_targets,)
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
Lasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, positive=False, precompute=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[ 0.85 0. ]
>>> print(clf.intercept_)
0.15
See also
--------
lars_path
lasso_path
LassoLars
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
precompute=False, copy_X=True, max_iter=1000,
tol=1e-4, warm_start=False, positive=False,
random_state=None, selection='cyclic'):
super(Lasso, self).__init__(
alpha=alpha, l1_ratio=1.0, fit_intercept=fit_intercept,
normalize=normalize, precompute=precompute, copy_X=copy_X,
max_iter=max_iter, tol=tol, warm_start=warm_start,
positive=positive, random_state=random_state,
selection=selection)
###############################################################################
# Functions for CV with paths functions
def _path_residuals(X, y, train, test, path, path_params, alphas=None,
l1_ratio=1, X_order=None, dtype=None):
"""Returns the MSE for the models computed by 'path'
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
train : list of indices
The indices of the train set
test : list of indices
The indices of the test set
path : callable
function returning a list of models on the path. See
enet_path for an example of signature
path_params : dictionary
Parameters passed to the path function
alphas : array-like, optional
Array of float that is used for cross-validation. If not
provided, computed using 'path'
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0`` the penalty is an
L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty. For ``0
< l1_ratio < 1``, the penalty is a combination of L1 and L2
X_order : {'F', 'C', or None}, optional
The order of the arrays expected by the path function to
avoid memory copies
dtype : a numpy dtype or None
The dtype of the arrays expected by the path function to
avoid memory copies
"""
X_train = X[train]
y_train = y[train]
X_test = X[test]
y_test = y[test]
fit_intercept = path_params['fit_intercept']
normalize = path_params['normalize']
if y.ndim == 1:
precompute = path_params['precompute']
else:
# No Gram variant of multi-task exists right now.
# Fall back to default enet_multitask
precompute = False
X_train, y_train, X_mean, y_mean, X_std, precompute, Xy = \
_pre_fit(X_train, y_train, None, precompute, normalize, fit_intercept,
copy=False)
path_params = path_params.copy()
path_params['Xy'] = Xy
path_params['X_mean'] = X_mean
path_params['X_std'] = X_std
path_params['precompute'] = precompute
path_params['copy_X'] = False
path_params['alphas'] = alphas
if 'l1_ratio' in path_params:
path_params['l1_ratio'] = l1_ratio
# Do the ordering and type casting here, as if it is done in the path,
# X is copied and a reference is kept here
X_train = check_array(X_train, 'csc', dtype=dtype, order=X_order)
alphas, coefs, _ = path(X_train, y_train, **path_params)
del X_train, y_train
if y.ndim == 1:
# Doing this so that it becomes coherent with multioutput.
coefs = coefs[np.newaxis, :, :]
y_mean = np.atleast_1d(y_mean)
y_test = y_test[:, np.newaxis]
if normalize:
nonzeros = np.flatnonzero(X_std)
coefs[:, nonzeros] /= X_std[nonzeros][:, np.newaxis]
intercepts = y_mean[:, np.newaxis] - np.dot(X_mean, coefs)
if sparse.issparse(X_test):
n_order, n_features, n_alphas = coefs.shape
# Work around for sparse matices since coefs is a 3-D numpy array.
coefs_feature_major = np.rollaxis(coefs, 1)
feature_2d = np.reshape(coefs_feature_major, (n_features, -1))
X_test_coefs = safe_sparse_dot(X_test, feature_2d)
X_test_coefs = X_test_coefs.reshape(X_test.shape[0], n_order, -1)
else:
X_test_coefs = safe_sparse_dot(X_test, coefs)
residues = X_test_coefs - y_test[:, :, np.newaxis]
residues += intercepts
this_mses = ((residues ** 2).mean(axis=0)).mean(axis=0)
return this_mses
class LinearModelCV(six.with_metaclass(ABCMeta, LinearModel)):
"""Base class for iterative model fitting along a regularization path"""
@abstractmethod
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.copy_X = copy_X
self.cv = cv
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit linear model with coordinate descent
Fit is on grid of alphas and best alpha estimated by cross-validation.
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
Training data. Pass directly as float64, Fortran-contiguous data
to avoid unnecessary memory duplication. If y is mono-output,
X can be sparse.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values
"""
y = np.asarray(y, dtype=np.float64)
if y.shape[0] == 0:
raise ValueError("y has 0 samples: %r" % y)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if isinstance(self, ElasticNetCV) or isinstance(self, LassoCV):
if model_str == 'ElasticNet':
model = ElasticNet()
else:
model = Lasso()
if y.ndim > 1 and y.shape[1] > 1:
raise ValueError("For multi-task outputs, use "
"MultiTask%sCV" % (model_str))
y = column_or_1d(y, warn=True)
else:
if sparse.isspmatrix(X):
raise TypeError("X should be dense but a sparse matrix was"
"passed")
elif y.ndim == 1:
raise ValueError("For mono-task outputs, use "
"%sCV" % (model_str))
if model_str == 'ElasticNet':
model = MultiTaskElasticNet()
else:
model = MultiTaskLasso()
if self.selection not in ["random", "cyclic"]:
raise ValueError("selection should be either random or cyclic.")
# This makes sure that there is no duplication in memory.
# Dealing right with copy_X is important in the following:
# Multiple functions touch X and subsamples of X and can induce a
# lot of duplication of memory
copy_X = self.copy_X and self.fit_intercept
if isinstance(X, np.ndarray) or sparse.isspmatrix(X):
# Keep a reference to X
reference_to_old_X = X
# Let us not impose fortran ordering or float64 so far: it is
# not useful for the cross-validation loop and will be done
# by the model fitting itself
X = check_array(X, 'csc', copy=False)
if sparse.isspmatrix(X):
if (hasattr(reference_to_old_X, "data") and
not np.may_share_memory(reference_to_old_X.data, X.data)):
# X is a sparse matrix and has been copied
copy_X = False
elif not np.may_share_memory(reference_to_old_X, X):
# X has been copied
copy_X = False
del reference_to_old_X
else:
X = check_array(X, 'csc', dtype=np.float64, order='F', copy=copy_X)
copy_X = False
if X.shape[0] != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (X.shape[0], y.shape[0]))
# All LinearModelCV parameters except 'cv' are acceptable
path_params = self.get_params()
if 'l1_ratio' in path_params:
l1_ratios = np.atleast_1d(path_params['l1_ratio'])
# For the first path, we need to set l1_ratio
path_params['l1_ratio'] = l1_ratios[0]
else:
l1_ratios = [1, ]
path_params.pop('cv', None)
path_params.pop('n_jobs', None)
alphas = self.alphas
n_l1_ratio = len(l1_ratios)
if alphas is None:
alphas = []
for l1_ratio in l1_ratios:
alphas.append(_alpha_grid(
X, y, l1_ratio=l1_ratio,
fit_intercept=self.fit_intercept,
eps=self.eps, n_alphas=self.n_alphas,
normalize=self.normalize,
copy_X=self.copy_X))
else:
# Making sure alphas is properly ordered.
alphas = np.tile(np.sort(alphas)[::-1], (n_l1_ratio, 1))
# We want n_alphas to be the number of alphas used for each l1_ratio.
n_alphas = len(alphas[0])
path_params.update({'n_alphas': n_alphas})
path_params['copy_X'] = copy_X
# We are not computing in parallel, we can modify X
# inplace in the folds
if not (self.n_jobs == 1 or self.n_jobs is None):
path_params['copy_X'] = False
# init cross-validation generator
cv = check_cv(self.cv, X)
# Compute path for all folds and compute MSE to get the best alpha
folds = list(cv)
best_mse = np.inf
# We do a double for loop folded in one, in order to be able to
# iterate in parallel on l1_ratio and folds
jobs = (delayed(_path_residuals)(X, y, train, test, self.path,
path_params, alphas=this_alphas,
l1_ratio=this_l1_ratio, X_order='F',
dtype=np.float64)
for this_l1_ratio, this_alphas in zip(l1_ratios, alphas)
for train, test in folds)
mse_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(jobs)
mse_paths = np.reshape(mse_paths, (n_l1_ratio, len(folds), -1))
mean_mse = np.mean(mse_paths, axis=1)
self.mse_path_ = np.squeeze(np.rollaxis(mse_paths, 2, 1))
for l1_ratio, l1_alphas, mse_alphas in zip(l1_ratios, alphas,
mean_mse):
i_best_alpha = np.argmin(mse_alphas)
this_best_mse = mse_alphas[i_best_alpha]
if this_best_mse < best_mse:
best_alpha = l1_alphas[i_best_alpha]
best_l1_ratio = l1_ratio
best_mse = this_best_mse
self.l1_ratio_ = best_l1_ratio
self.alpha_ = best_alpha
if self.alphas is None:
self.alphas_ = np.asarray(alphas)
if n_l1_ratio == 1:
self.alphas_ = self.alphas_[0]
# Remove duplicate alphas in case alphas is provided.
else:
self.alphas_ = np.asarray(alphas[0])
# Refit the model with the parameters selected
common_params = dict((name, value)
for name, value in self.get_params().items()
if name in model.get_params())
model.set_params(**common_params)
model.alpha = best_alpha
model.l1_ratio = best_l1_ratio
model.copy_X = copy_X
model.precompute = False
model.fit(X, y)
if not hasattr(self, 'l1_ratio'):
del self.l1_ratio_
self.coef_ = model.coef_
self.intercept_ = model.intercept_
self.dual_gap_ = model.dual_gap_
self.n_iter_ = model.n_iter_
return self
class LassoCV(LinearModelCV, RegressorMixin):
"""Lasso linear model with iterative fitting along a regularization path
The best model is selected by cross-validation.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Read more in the :ref:`User Guide <lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path
alphas : numpy array, optional
List of alphas where to compute the models.
If ``None`` alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
If positive, restrict regression coefficients to be positive
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean, default True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
parameter vector (w in the cost function formula)
intercept_ : float | array, shape (n_targets,)
independent term in decision function.
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting
dual_gap_ : ndarray, shape ()
The dual gap at the end of the optimization for the optimal alpha
(``alpha_``).
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
See also
--------
lars_path
lasso_path
LassoLars
Lasso
LassoLarsCV
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, precompute='auto', max_iter=1000, tol=1e-4,
copy_X=True, cv=None, verbose=False, n_jobs=1,
positive=False, random_state=None, selection='cyclic'):
super(LassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
precompute=precompute, max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, positive=positive,
random_state=random_state, selection=selection)
class ElasticNetCV(LinearModelCV, RegressorMixin):
"""Elastic Net model with iterative fitting along a regularization path
The best model is selected by cross-validation.
Read more in the :ref:`User Guide <elastic_net>`.
Parameters
----------
l1_ratio : float, optional
float between 0 and 1 passed to ElasticNet (scaling between
l1 and l2 penalties). For ``l1_ratio = 0``
the penalty is an L2 penalty. For ``l1_ratio = 1`` it is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1 and L2
This parameter can be a list, in which case the different
values are tested by cross-validation and the one giving the best
prediction score is used. Note that a good choice of list of
values for l1_ratio is often to put more values close to 1
(i.e. Lasso) and less close to 0 (i.e. Ridge), as in ``[.1, .5, .7,
.9, .95, .99, 1]``
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
n_alphas : int, optional
Number of alphas along the regularization path, used for each l1_ratio.
alphas : numpy array, optional
List of alphas where to compute the models.
If None alphas are set automatically
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs.
positive : bool, optional
When set to ``True``, forces the coefficients to be positive.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Attributes
----------
alpha_ : float
The amount of penalization chosen by cross validation
l1_ratio_ : float
The compromise between l1 and l2 penalization chosen by
cross validation
coef_ : array, shape (n_features,) | (n_targets, n_features)
Parameter vector (w in the cost function formula),
intercept_ : float | array, shape (n_targets, n_features)
Independent term in the decision function.
mse_path_ : array, shape (n_l1_ratio, n_alpha, n_folds)
Mean square error for the test set on each fold, varying l1_ratio and
alpha.
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Notes
-----
See examples/linear_model/lasso_path_with_crossvalidation.py
for an example.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
The parameter l1_ratio corresponds to alpha in the glmnet R package
while alpha corresponds to the lambda parameter in glmnet.
More specifically, the optimization objective is::
1 / (2 * n_samples) * ||y - Xw||^2_2 +
+ alpha * l1_ratio * ||w||_1
+ 0.5 * alpha * (1 - l1_ratio) * ||w||^2_2
If you are interested in controlling the L1 and L2 penalty
separately, keep in mind that this is equivalent to::
a * L1 + b * L2
for::
alpha = a + b and l1_ratio = a / (a + b).
See also
--------
enet_path
ElasticNet
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False, precompute='auto',
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, positive=False, random_state=None,
selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.precompute = precompute
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.positive = positive
self.random_state = random_state
self.selection = selection
###############################################################################
# Multi Task ElasticNet and Lasso models (with joint feature selection)
class MultiTaskElasticNet(Lasso):
"""Multi-task ElasticNet model trained with L1/L2 mixed-norm as regularizer
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
l1_ratio : float
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula). If a 1D y is \
passed in at fit (non multi-task usage), ``coef_`` is then a 1D array
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNet(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNet(alpha=0.1, copy_X=True, fit_intercept=True,
l1_ratio=0.5, max_iter=1000, normalize=False, random_state=None,
selection='cyclic', tol=0.0001, warm_start=False)
>>> print(clf.coef_)
[[ 0.45663524 0.45612256]
[ 0.45663524 0.45612256]]
>>> print(clf.intercept_)
[ 0.0872422 0.0872422]
See also
--------
ElasticNet, MultiTaskLasso
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, l1_ratio=0.5, fit_intercept=True,
normalize=False, copy_X=True, max_iter=1000, tol=1e-4,
warm_start=False, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.random_state = random_state
self.selection = selection
def fit(self, X, y):
"""Fit MultiTaskLasso model with coordinate descent
Parameters
-----------
X : ndarray, shape (n_samples, n_features)
Data
y : ndarray, shape (n_samples, n_tasks)
Target
Notes
-----
Coordinate descent is an algorithm that considers each column of
data at a time hence it will automatically convert the X input
as a Fortran-contiguous numpy array if necessary.
To avoid memory re-allocation it is advised to allocate the
initial data in memory directly using that format.
"""
# X and y must be of type float64
X = check_array(X, dtype=np.float64, order='F',
copy=self.copy_X and self.fit_intercept)
y = np.asarray(y, dtype=np.float64)
if hasattr(self, 'l1_ratio'):
model_str = 'ElasticNet'
else:
model_str = 'Lasso'
if y.ndim == 1:
raise ValueError("For mono-task outputs, use %s" % model_str)
n_samples, n_features = X.shape
_, n_tasks = y.shape
if n_samples != y.shape[0]:
raise ValueError("X and y have inconsistent dimensions (%d != %d)"
% (n_samples, y.shape[0]))
X, y, X_mean, y_mean, X_std = center_data(
X, y, self.fit_intercept, self.normalize, copy=False)
if not self.warm_start or self.coef_ is None:
self.coef_ = np.zeros((n_tasks, n_features), dtype=np.float64,
order='F')
l1_reg = self.alpha * self.l1_ratio * n_samples
l2_reg = self.alpha * (1.0 - self.l1_ratio) * n_samples
self.coef_ = np.asfortranarray(self.coef_) # coef contiguous in memory
if self.selection not in ['random', 'cyclic']:
raise ValueError("selection should be either random or cyclic.")
random = (self.selection == 'random')
self.coef_, self.dual_gap_, self.eps_, self.n_iter_ = \
cd_fast.enet_coordinate_descent_multi_task(
self.coef_, l1_reg, l2_reg, X, y, self.max_iter, self.tol,
check_random_state(self.random_state), random)
self._set_intercept(X_mean, y_mean, X_std)
if self.dual_gap_ > self.eps_:
warnings.warn('Objective did not converge, you might want'
' to increase the number of iterations')
# return self for chaining fit and predict calls
return self
class MultiTaskLasso(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of earch row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, optional
Constant that multiplies the L1/L2 term. Defaults to 1.0
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
warm_start : bool, optional
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
coef_ : array, shape (n_tasks, n_features)
parameter vector (W in the cost function formula)
intercept_ : array, shape (n_tasks,)
independent term in decision function.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0,0], [1, 1], [2, 2]], [[0, 0], [1, 1], [2, 2]])
MultiTaskLasso(alpha=0.1, copy_X=True, fit_intercept=True, max_iter=1000,
normalize=False, random_state=None, selection='cyclic', tol=0.0001,
warm_start=False)
>>> print(clf.coef_)
[[ 0.89393398 0. ]
[ 0.89393398 0. ]]
>>> print(clf.intercept_)
[ 0.10606602 0.10606602]
See also
--------
Lasso, MultiTaskElasticNet
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=1000, tol=1e-4, warm_start=False,
random_state=None, selection='cyclic'):
self.alpha = alpha
self.coef_ = None
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
class MultiTaskElasticNetCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 ElasticNet with built-in cross-validation.
The optimization objective for MultiTaskElasticNet is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2
+ alpha * l1_ratio * ||W||_21
+ 0.5 * alpha * (1 - l1_ratio) * ||W||_Fro^2
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automatically.
n_alphas : int, optional
Number of alphas along the regularization path
l1_ratio : float or array of floats
The ElasticNet mixing parameter, with 0 < l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L1/L2 penalty. For l1_ratio = 1 it
is an L1 penalty.
For ``0 < l1_ratio < 1``, the penalty is a combination of L1/L2 and L2.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds) or \
(n_l1_ratio, n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,) or (n_l1_ratio, n_alphas)
The grid of alphas used for fitting, for each l1_ratio
l1_ratio_ : float
best l1_ratio obtained by cross-validation.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskElasticNetCV()
>>> clf.fit([[0,0], [1, 1], [2, 2]],
... [[0, 0], [1, 1], [2, 2]])
... #doctest: +NORMALIZE_WHITESPACE
MultiTaskElasticNetCV(alphas=None, copy_X=True, cv=None, eps=0.001,
fit_intercept=True, l1_ratio=0.5, max_iter=1000, n_alphas=100,
n_jobs=1, normalize=False, random_state=None, selection='cyclic',
tol=0.0001, verbose=0)
>>> print(clf.coef_)
[[ 0.52875032 0.46958558]
[ 0.52875032 0.46958558]]
>>> print(clf.intercept_)
[ 0.00166409 0.00166409]
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskLassoCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(enet_path)
def __init__(self, l1_ratio=0.5, eps=1e-3, n_alphas=100, alphas=None,
fit_intercept=True, normalize=False,
max_iter=1000, tol=1e-4, cv=None, copy_X=True,
verbose=0, n_jobs=1, random_state=None, selection='cyclic'):
self.l1_ratio = l1_ratio
self.eps = eps
self.n_alphas = n_alphas
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.max_iter = max_iter
self.tol = tol
self.cv = cv
self.copy_X = copy_X
self.verbose = verbose
self.n_jobs = n_jobs
self.random_state = random_state
self.selection = selection
class MultiTaskLassoCV(LinearModelCV, RegressorMixin):
"""Multi-task L1/L2 Lasso with built-in cross-validation.
The optimization objective for MultiTaskLasso is::
(1 / (2 * n_samples)) * ||Y - XW||^Fro_2 + alpha * ||W||_21
Where::
||W||_21 = \sum_i \sqrt{\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
eps : float, optional
Length of the path. ``eps=1e-3`` means that
``alpha_min / alpha_max = 1e-3``.
alphas : array-like, optional
List of alphas where to compute the models.
If not provided, set automaticlly.
n_alphas : int, optional
Number of alphas along the regularization path
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, optional
The maximum number of iterations.
tol : float, optional
The tolerance for the optimization: if the updates are
smaller than ``tol``, the optimization code checks the
dual gap for optimality and continues until it is smaller
than ``tol``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see the
:mod:`sklearn.cross_validation` module for the list of possible
objects.
verbose : bool or integer
Amount of verbosity.
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs. Note that this is used only if multiple values for
l1_ratio are given.
selection : str, default 'cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
random_state : int, RandomState instance, or None (default)
The seed of the pseudo random number generator that selects
a random feature to update. Useful only when selection is set to
'random'.
Attributes
----------
intercept_ : array, shape (n_tasks,)
Independent term in decision function.
coef_ : array, shape (n_tasks, n_features)
Parameter vector (W in the cost function formula).
alpha_ : float
The amount of penalization chosen by cross validation
mse_path_ : array, shape (n_alphas, n_folds)
mean square error for the test set on each fold, varying alpha
alphas_ : numpy array, shape (n_alphas,)
The grid of alphas used for fitting.
n_iter_ : int
number of iterations run by the coordinate descent solver to reach
the specified tolerance for the optimal alpha.
See also
--------
MultiTaskElasticNet
ElasticNetCV
MultiTaskElasticNetCV
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X argument of the fit method
should be directly passed as a Fortran-contiguous numpy array.
"""
path = staticmethod(lasso_path)
def __init__(self, eps=1e-3, n_alphas=100, alphas=None, fit_intercept=True,
normalize=False, max_iter=1000, tol=1e-4, copy_X=True,
cv=None, verbose=False, n_jobs=1, random_state=None,
selection='cyclic'):
super(MultiTaskLassoCV, self).__init__(
eps=eps, n_alphas=n_alphas, alphas=alphas,
fit_intercept=fit_intercept, normalize=normalize,
max_iter=max_iter, tol=tol, copy_X=copy_X,
cv=cv, verbose=verbose, n_jobs=n_jobs, random_state=random_state,
selection=selection)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.