repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
manashmndl/scikit-learn | sklearn/tests/test_grid_search.py | 68 | 28778 | """
Testing for grid search module (sklearn.grid_search)
"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV,
ParameterGrid, ParameterSampler,
ChangedBehaviorWarning)
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
for i, foo_i in enumerate([1, 2, 3]):
assert_true(grid_search.grid_scores_[i][0]
== {'foo_param': foo_i})
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y)
score_accuracy = assert_warns(ChangedBehaviorWarning,
search_accuracy.score, X, y)
score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score,
X, y)
score_auc = assert_warns(ChangedBehaviorWarning,
search_auc.score, X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_trivial_grid_scores():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(random_search, "grid_scores_"))
def test_no_refit():
# Test that grid search can be used for model selection only
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "best_params_"))
def test_grid_search_error():
# Test that grid search will capture errors on data with different
# length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_iid():
# test the iid parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
svm = SVC(kernel='linear')
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average
assert_almost_equal(first.mean_validation_score,
1 * 1. / 4. + 1. / 3. * 3. / 4.)
# once with iid=False
grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv,
iid=False)
grid_search.fit(X, y)
first = grid_search.grid_scores_[0]
assert_equal(first.parameters['C'], 1)
# scores are the same as above
assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.])
# averaged score is just mean of scores
assert_almost_equal(first.mean_validation_score,
np.mean(first.cv_validation_scores))
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
def test_grid_search_precomputed_kernel_error_kernel_function():
# Test that grid search returns an error when using a kernel_function
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
kernel_function = lambda x1, x2: np.dot(x1, x2.T)
clf = SVC(kernel=kernel_function)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_, y_)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n=len(X), n_folds=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "grid_scores_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
def test_randomized_search_grid_scores():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# XXX: as of today (scipy 0.12) it's not possible to set the random seed
# of scipy.stats distributions: the assertions in this test should thus
# not depend on the randomization
params = dict(C=expon(scale=10),
gamma=expon(scale=0.1))
n_cv_iter = 3
n_search_iter = 30
search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter,
param_distributions=params, iid=False)
search.fit(X, y)
assert_equal(len(search.grid_scores_), n_search_iter)
# Check consistency of the structure of each cv_score item
for cv_score in search.grid_scores_:
assert_equal(len(cv_score.cv_validation_scores), n_cv_iter)
# Because we set iid to False, the mean_validation score is the
# mean of the fold mean scores instead of the aggregate sample-wise
# mean score
assert_almost_equal(np.mean(cv_score.cv_validation_scores),
cv_score.mean_validation_score)
assert_equal(list(sorted(cv_score.parameters.keys())),
list(sorted(params.keys())))
# Check the consistency with the best_score_ and best_params_ attributes
sorted_grid_scores = list(sorted(search.grid_scores_,
key=lambda x: x.mean_validation_score))
best_score = sorted_grid_scores[-1].mean_validation_score
assert_equal(search.best_score_, best_score)
tied_best_params = [s.parameters for s in sorted_grid_scores
if s.mean_validation_score == best_score]
assert_true(search.best_params_ in tied_best_params,
"best_params_={0} is not part of the"
" tied best models: {1}".format(
search.best_params_, tied_best_params))
def test_grid_search_score_consistency():
# test that correct scores are used
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score)
grid_search.fit(X, y)
cv = StratifiedKFold(n_folds=3, y=y)
for C, scores in zip(Cs, grid_search.grid_scores_):
clf.set_params(C=C)
scores = scores[2] # get the separate runs from grid scores
i = 0
for train, test in cv:
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, scores[i])
i += 1
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
pickle.dumps(grid_search) # smoke test
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
pickle.dumps(random_search) # smoke test
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(y.shape[0], random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
for parameters, _, cv_validation_scores in grid_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
for parameters, _, cv_validation_scores in random_search.grid_scores_:
est.set_params(**parameters)
for i, (train, test) in enumerate(cv):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(correct_score,
cv_validation_scores[i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
assert all(np.all(this_point.cv_validation_scores == 0.0)
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
assert all(np.all(np.isnan(this_point.cv_validation_scores))
for this_point in gs.grid_scores_
if this_point.parameters['parameter'] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
| bsd-3-clause |
tridesclous/tridesclous | setup.py | 1 | 1874 | from setuptools import setup
import os
d = {}
exec(open("tridesclous/version.py").read(), None, d)
version = d['version']
install_requires = [
'numpy',
'scipy',
'pandas',
'openpyxl',
'scikit-learn>=0.22.2',
'matplotlib',
'seaborn',
'neo>=0.8',
'tqdm',
# 'PyQt5', make conda buggy
'pyqtgraph',
'joblib',
'numba',
'hdbscan',
]
extras_require={
'online' : ['pyacq',],
'opencl' : ['pyopencl'],
}
long_description = ""
setup(
name = "tridesclous",
version = version,
packages = ['tridesclous', 'tridesclous.gui', 'tridesclous.gui.icons',
'tridesclous.online', 'tridesclous.scripts', 'tridesclous.tests'],
install_requires=install_requires,
extras_require = extras_require,
author = "C. Pouzat, S.Garcia",
author_email = "",
description = "offline/online spike sorting with french touch that light the barbecue",
long_description = long_description,
entry_points={
'console_scripts': ['tdc=tridesclous.scripts.tdc:main'],
#~ 'gui_scripts': ['tdcgui=tridesclous.scripts.tdc:open_mainwindow'],
},
license = "MIT",
url='https://github.com/tridesclous/trisdesclous',
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering']
)
| mit |
marqh/iris | docs/iris/example_code/General/custom_file_loading.py | 5 | 12531 | """
Loading a cube from a custom file format
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This example shows how a custom text file can be loaded using the standard Iris
load mechanism.
The first stage in the process is to define an Iris :class:`FormatSpecification
<iris.io.format_picker.FormatSpecification>` for the file format. To create a
format specification we need to define the following:
* format_name - Some text that describes the format specification we are
creating
* file_element - FileElement object describing the element which identifies
this FormatSpecification.
Possible values are:
``iris.io.format_picker.MagicNumber(n, o)``
The n bytes from the file at offset o.
``iris.io.format_picker.FileExtension()``
The file's extension.
``iris.io.format_picker.LeadingLine()``
The first line of the file.
* file_element_value - The value that the file_element should take if a file
matches this FormatSpecification
* handler (optional) - A generator function that will be called when the file
specification has been identified. This function is provided by the user and
provides the means to parse the whole file. If no handler function is
provided, then identification is still possible without any handling.
The handler function must define the following arguments:
* list of filenames to process
* callback function - An optional function to filter/alter the Iris cubes
returned
The handler function must be defined as generator which yields each cube as
they are produced.
* priority (optional) - Integer giving a priority for considering this
specification where higher priority means sooner consideration
In the following example, the function :func:`load_NAME_III` has been defined
to handle the loading of the raw data from the custom file format. This
function is called from :func:`NAME_to_cube` which uses this data to create and
yield Iris cubes.
In the ``main()`` function the filenames are loaded via the ``iris.load_cube``
function which automatically invokes the ``FormatSpecification`` we defined.
The cube returned from the load function is then used to produce a plot.
"""
import datetime
import matplotlib.pyplot as plt
import numpy as np
from cf_units import Unit, CALENDAR_GREGORIAN
import iris
import iris.coords as icoords
import iris.coord_systems as icoord_systems
import iris.fileformats
import iris.io.format_picker as format_picker
import iris.plot as iplt
UTC_format = '%H%M%Z %d/%m/%Y'
FLOAT_HEADERS = ['X grid origin', 'Y grid origin',
'X grid resolution', 'Y grid resolution']
INT_HEADERS = ['X grid size', 'Y grid size', 'Number of fields']
DATE_HEADERS = ['Run time', 'Start of release', 'End of release']
COLUMN_NAMES = ['species_category', 'species', 'cell_measure', 'quantity',
'unit', 'z_level', 'time']
def load_NAME_III(filename):
"""
Loads the Met Office's NAME III grid output files returning headers, column
definitions and data arrays as 3 separate lists.
"""
# Loading a file gives a generator of lines which can be progressed using
# the next() function. This will come in handy as we wish to progress
# through the file line by line.
with open(filename) as file_handle:
# Define a dictionary which can hold the header metadata for this file.
headers = {}
# Skip the NAME header of the file which looks something like
# 'NAME III (version X.X.X)'.
next(file_handle)
# Read the next 16 lines of header information, putting the form
# "header name: header value" into a dictionary.
for _ in range(16):
header_name, header_value = next(file_handle).split(':')
# Strip off any spurious space characters in the header name and
# value.
header_name = header_name.strip()
header_value = header_value.strip()
# Cast some headers into floats or integers if they match a given
# header name.
if header_name in FLOAT_HEADERS:
header_value = float(header_value)
elif header_name in INT_HEADERS:
header_value = int(header_value)
elif header_name in DATE_HEADERS:
# convert the time to python datetimes
header_value = datetime.datetime.strptime(header_value,
UTC_format)
headers[header_name] = header_value
# Skip the next blank line in the file.
next(file_handle)
# Read the next 7 lines of column definitions.
column_headings = {}
for column_header_name in COLUMN_NAMES:
column_headings[column_header_name] = [
col.strip() for col in next(file_handle).split(',')
][:-1]
# Convert the time to python datetimes.
new_time_column_header = []
for i, t in enumerate(column_headings['time']):
# The first 4 columns aren't time at all, so don't convert them to
# datetimes.
if i >= 4:
t = datetime.datetime.strptime(t, UTC_format)
new_time_column_header.append(t)
column_headings['time'] = new_time_column_header
# Skip the blank line after the column headers.
next(file_handle)
# Make a list of data arrays to hold the data for each column.
data_shape = (headers['Y grid size'], headers['X grid size'])
data_arrays = [np.zeros(data_shape, dtype=np.float32)
for i in range(headers['Number of fields'])]
# Iterate over the remaining lines which represent the data in a column
# form.
for line in file_handle:
# Split the line by comma, removing the last empty column caused by
# the trailing comma.
vals = line.split(',')[:-1]
# Cast the x and y grid positions to floats and convert them to
# zero based indices (the numbers are 1 based grid positions where
# 0.5 represents half a grid point.)
x = int(float(vals[0]) - 1.5)
y = int(float(vals[1]) - 1.5)
# Populate the data arrays (i.e. all columns but the leading 4).
for i, data_array in enumerate(data_arrays):
data_array[y, x] = float(vals[i + 4])
return headers, column_headings, data_arrays
def NAME_to_cube(filenames, callback):
"""
Returns a generator of cubes given a list of filenames and a callback.
"""
for filename in filenames:
header, column_headings, data_arrays = load_NAME_III(filename)
for i, data_array in enumerate(data_arrays):
# turn the dictionary of column headers with a list of header
# information for each field into a dictionary of headers for just
# this field. Ignore the first 4 columns of grid position (data was
# located with the data array).
field_headings = dict((k, v[i + 4])
for k, v in column_headings.items())
# make an cube
cube = iris.cube.Cube(data_array)
# define the name and unit
name = ('%s %s' % (field_headings['species'],
field_headings['quantity']))
name = name.upper().replace(' ', '_')
cube.rename(name)
# Some units are badly encoded in the file, fix this by putting a
# space in between. (if gs is not found, then the string will be
# returned unchanged)
cube.units = field_headings['unit'].replace('gs', 'g s')
# define and add the singular coordinates of the field (flight
# level, time etc.)
cube.add_aux_coord(icoords.AuxCoord(field_headings['z_level'],
long_name='flight_level',
units='1'))
# define the time unit and use it to serialise the datetime for the
# time coordinate
time_unit = Unit('hours since epoch', calendar=CALENDAR_GREGORIAN)
time_coord = icoords.AuxCoord(
time_unit.date2num(field_headings['time']),
standard_name='time',
units=time_unit)
cube.add_aux_coord(time_coord)
# build a coordinate system which can be referenced by latitude and
# longitude coordinates
lat_lon_coord_system = icoord_systems.GeogCS(6371229)
# build regular latitude and longitude coordinates which have
# bounds
start = header['X grid origin'] + header['X grid resolution']
step = header['X grid resolution']
count = header['X grid size']
pts = start + np.arange(count, dtype=np.float32) * step
lon_coord = icoords.DimCoord(pts, standard_name='longitude',
units='degrees',
coord_system=lat_lon_coord_system)
lon_coord.guess_bounds()
start = header['Y grid origin'] + header['Y grid resolution']
step = header['Y grid resolution']
count = header['Y grid size']
pts = start + np.arange(count, dtype=np.float32) * step
lat_coord = icoords.DimCoord(pts, standard_name='latitude',
units='degrees',
coord_system=lat_lon_coord_system)
lat_coord.guess_bounds()
# add the latitude and longitude coordinates to the cube, with
# mappings to data dimensions
cube.add_dim_coord(lat_coord, 0)
cube.add_dim_coord(lon_coord, 1)
# implement standard iris callback capability. Although callbacks
# are not used in this example, the standard mechanism for a custom
# loader to implement a callback is shown:
cube = iris.io.run_callback(callback, cube,
[header, field_headings, data_array],
filename)
# yield the cube created (the loop will continue when the next()
# element is requested)
yield cube
# Create a format_picker specification of the NAME file format giving it a
# priority greater than the built in NAME loader.
_NAME_III_spec = format_picker.FormatSpecification(
'Name III',
format_picker.LeadingLine(),
lambda line: line.startswith(b"NAME III"),
NAME_to_cube,
priority=6)
# Register the NAME loader with iris
iris.fileformats.FORMAT_AGENT.add_spec(_NAME_III_spec)
# ---------------------------------------------
# | Using the new loader |
# ---------------------------------------------
def main():
fname = iris.sample_data_path('NAME_output.txt')
boundary_volc_ash_constraint = iris.Constraint(
'VOLCANIC_ASH_AIR_CONCENTRATION',
flight_level='From FL000 - FL200')
# Callback shown as None to illustrate where a cube-level callback function
# would be used if required
cube = iris.load_cube(fname, boundary_volc_ash_constraint, callback=None)
# draw contour levels for the data (the top level is just a catch-all)
levels = (0.0002, 0.002, 0.004, 1e10)
cs = iplt.contourf(cube, levels=levels,
colors=('#80ffff', '#939598', '#e00404'),
)
# draw a black outline at the lowest contour to highlight affected areas
iplt.contour(cube, levels=(levels[0], 100),
colors='black')
# set an extent and a background image for the map
ax = plt.gca()
ax.set_extent((-90, 20, 20, 75))
ax.stock_img('ne_shaded')
# make a legend, with custom labels, for the coloured contour set
artists, _ = cs.legend_elements()
labels = [
r'$%s < x \leq %s$' % (levels[0], levels[1]),
r'$%s < x \leq %s$' % (levels[1], levels[2]),
r'$x > %s$' % levels[2]
]
ax.legend(artists, labels, title='Ash concentration / g m-3',
loc='upper left')
time = cube.coord('time')
time_date = time.units.num2date(time.points[0]).strftime(UTC_format)
plt.title('Volcanic ash concentration forecast\nvalid at %s' % time_date)
iplt.show()
if __name__ == '__main__':
main()
| lgpl-3.0 |
mkukielka/oddt | oddt/scoring/functions/PLECscore.py | 1 | 14458 | from __future__ import print_function
import sys
from os.path import dirname, isfile, join as path_join
from functools import partial
import json
import warnings
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from sklearn.metrics import r2_score
from sklearn import __version__ as sklearn_version
from sklearn.linear_model import SGDRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from oddt.metrics import rmse, standard_deviation_error
from oddt.scoring import scorer
from oddt.fingerprints import PLEC, MAX_HASH_VALUE
from oddt.scoring.descriptors import universal_descriptor
class PLECscore(scorer):
def __init__(self, protein=None, n_jobs=-1, version='linear',
depth_protein=5, depth_ligand=1, size=65536):
"""PLECscore - a novel scoring function based on PLEC fingerprints. The
underlying model can be one of:
* linear regression
* neural network (dense, 200x200x200)
* random forest (100 trees)
The scoring function is trained on PDBbind v2016 database and even with
linear model outperforms other machine-learning ones in terms of Pearson
correlation coefficient on "core set". For details see PLEC publication.
PLECscore predicts binding affinity (pKi/d).
.. versionadded:: 0.6
Parameters
----------
protein : oddt.toolkit.Molecule object
Receptor for the scored ligands
n_jobs: int (default=-1)
Number of cores to use for scoring and training. By default (-1)
all cores are allocated.
version: str (default='linear')
A version of scoring function ('linear', 'nn' or 'rf') - which
model should be used for the scoring function.
depth_protein: int (default=5)
The depth of ECFP environments generated on the protein side of
interaction. By default 6 (0 to 5) environments are generated.
depth_ligand: int (default=1)
The depth of ECFP environments generated on the ligand side of
interaction. By default 2 (0 to 1) environments are generated.
size: int (default=65536)
The final size of a folded PLEC fingerprint. This setting is not
used to limit the data encoded in PLEC fingerprint (for that
tune the depths), but only the final lenght. Setting it to too
low value will lead to many collisions.
"""
self.protein = protein
self.n_jobs = n_jobs
self.version = version
self.depth_protein = depth_protein
self.depth_ligand = depth_ligand
self.size = size
plec_func = partial(PLEC,
depth_ligand=depth_ligand,
depth_protein=depth_protein,
size=size,
count_bits=True,
sparse=True,
ignore_hoh=True)
descriptors = universal_descriptor(plec_func, protein=protein,
shape=size, sparse=True)
if version == 'linear':
# avoid deprecation warnings
kwargs = {'fit_intercept': False,
'loss': 'huber',
'penalty': 'elasticnet',
'random_state': 0,
'verbose': 0,
'alpha': 1e-4,
'epsilon': 1e-1,
}
if sklearn_version >= '0.19':
kwargs['max_iter'] = 100
else:
kwargs['n_iter'] = 100
model = SGDRegressor(**kwargs)
elif version == 'nn':
model = MLPRegressor((200, 200, 200),
batch_size=10,
random_state=0,
verbose=0,
solver='lbfgs')
elif version == 'rf':
model = RandomForestRegressor(n_estimators=100,
n_jobs=n_jobs,
verbose=0,
oob_score=True,
random_state=0)
else:
raise ValueError('The version "%s" is not supported by PLECscore'
% version)
super(PLECscore, self).__init__(model, descriptors,
score_title='PLEC%s_p%i_l%i_s%i' %
(version, depth_protein, depth_ligand,
size))
def gen_training_data(self,
pdbbind_dir,
pdbbind_versions=(2016,),
home_dir=None,
use_proteins=True):
if home_dir is None:
home_dir = path_join(dirname(__file__), 'PLECscore')
filename = path_join(home_dir, 'plecscore_descs_p%i_l%i.csv.gz' %
(self.depth_protein, self.depth_ligand))
# The CSV will contain unfolded FP
self.descriptor_generator.func.keywords['size'] = MAX_HASH_VALUE
self.descriptor_generator.shape = MAX_HASH_VALUE
super(PLECscore, self)._gen_pdbbind_desc(
pdbbind_dir=pdbbind_dir,
pdbbind_versions=pdbbind_versions,
desc_path=filename,
include_general_set=True,
use_proteins=use_proteins,
)
# reset to the original size
self.descriptor_generator.func.keywords['size'] = self.size
self.descriptor_generator.shape = self.size
def gen_json(self, home_dir=None, pdbbind_version=2016):
if not home_dir:
home_dir = path_join(dirname(__file__), 'PLECscore')
if isinstance(self.model, SGDRegressor):
attributes = ['coef_', 'intercept_', 't_']
elif isinstance(self.model, MLPRegressor):
attributes = ['loss_', 'coefs_', 'intercepts_', 'n_iter_',
'n_layers_', 'n_outputs_', 'out_activation_']
out = {}
for attr_name in attributes:
attr = getattr(self.model, attr_name)
# convert numpy arrays to list for json
if isinstance(attr, np.ndarray):
attr = attr.tolist()
elif (isinstance(attr, (list, tuple)) and
isinstance(attr[0], np.ndarray)):
attr = [x.tolist() for x in attr]
out[attr_name] = attr
json_path = path_join(home_dir, 'plecscore_%s_p%i_l%i_s%i_pdbbind%i.json' %
(self.version, self.depth_protein,
self.depth_ligand, self.size, pdbbind_version))
with open(json_path, 'w') as json_f:
json.dump(out, json_f, indent=2)
return json_path
def train(self, home_dir=None, sf_pickle=None, pdbbind_version=2016,
ignore_json=False):
if not home_dir:
home_dir = path_join(dirname(__file__), 'PLECscore')
desc_path = path_join(home_dir, 'plecscore_descs_p%i_l%i.csv.gz' %
(self.depth_protein, self.depth_ligand))
json_path = path_join(
home_dir, 'plecscore_%s_p%i_l%i_s%i_pdbbind%i.json' %
(self.version, self.depth_protein,
self.depth_ligand, self.size, pdbbind_version))
if (self.version in ['linear'] and # TODO: support other models
isfile(json_path) and
not ignore_json):
print('Loading pretrained PLECscore %s with depths P%i L%i on '
'PDBBind v%i'
% (self.version, self.depth_protein, self.depth_ligand,
pdbbind_version), file=sys.stderr)
with open(json_path) as json_f:
json_data = json.load(json_f)
for k, v in json_data.items():
if isinstance(v, list):
if isinstance(v[0], list):
v = [np.array(x) for x in v]
else:
v = np.array(v)
setattr(self.model, k, v)
else:
# blacklist core set 2013 and astex
pdbids_blacklist = [
'3ao4', '3i3b', '1uto', '1ps3', '1qi0', '3g2z', '3dxg', '3l7b',
'3mfv', '3b3s', '3kgp', '3fk1', '3fcq', '3lka', '3udh', '4gqq',
'3imc', '2xdl', '2ymd', '1lbk', '1bcu', '3zsx', '1f8d', '3muz',
'2v00', '1loq', '3n7a', '2r23', '3nq3', '2hb1', '2w66', '1n2v',
'3kwa', '3g2n', '4de2', '3ozt', '3b3w', '3cft', '3f3a', '2qmj',
'3f80', '1a30', '1w3k', '3ivg', '2jdy', '3u9q', '3pxf', '2wbg',
'1u33', '2x0y', '3mss', '1vso', '1q8t', '3acw', '3bpc', '3vd4',
'3cj2', '2brb', '1p1q', '2vo5', '3d4z', '2gss', '2yge', '3gy4',
'3zso', '3ov1', '1w4o', '1zea', '2zxd', '3ueu', '2qft', '1gpk',
'1f8b', '2jdm', '3su5', '2wca', '3n86', '2x97', '1n1m', '1o5b',
'2y5h', '3ehy', '4des', '3ebp', '1q8u', '4de1', '3huc', '3l4w',
'2vl4', '3coy', '3f3c', '1os0', '3owj', '3bkk', '1yc1', '1hnn',
'3vh9', '3bfu', '1w3l', '3k5v', '2qbr', '1lol', '10gs', '2j78',
'1r5y', '2weg', '3uo4', '3jvs', '2yfe', '1sln', '2iwx', '2jdu',
'4djv', '2xhm', '2xnb', '3s8o', '2zcr', '3oe5', '3gbb', '2d3u',
'3uex', '4dew', '1xd0', '1z95', '2vot', '1oyt', '2ole', '3gcs',
'1kel', '2vvn', '3kv2', '3pww', '3su2', '1f8c', '2xys', '3l4u',
'2xb8', '2d1o', '2zjw', '3f3e', '2g70', '2zwz', '1u1b', '4g8m',
'1o3f', '2x8z', '3cyx', '2cet', '3ag9', '2pq9', '3l3n', '1nvq',
'2cbj', '2v7a', '1h23', '2qbp', '3b68', '2xbv', '2fvd', '2vw5',
'3ejr', '3f17', '3nox', '1hfs', '1jyq', '2pcp', '3ge7', '2wtv',
'2zcq', '2obf', '3e93', '2p4y', '3dd0', '3nw9', '3uri', '3gnw',
'3su3', '2xy9', '1sqa', '3fv1', '2yki', '3g0w', '3pe2', '1e66',
'1igj', '4tmn', '2zx6', '3myg', '4gid', '3utu', '1lor', '1mq6',
'2x00', '2j62', '4djr', '1gm8', '1gpk', '1hnn', '1hp0', '1hq2',
'1hvy', '1hwi', '1hww', '1ia1', '1j3j', '1jd0', '1jje', '1ke5',
'1kzk', '1l2s', '1l7f', '1lpz', '1m2z', '1mmv', '1mzc', '1n1m',
'1n2v', '1n46', '1nav', '1of1', '1of6', '1opk', '1oq5', '1owe',
'1oyt', '1p2y', '1p62', '1pmn', '1q1g', '1q41', '1q4g', '1r1h',
'1r55', '1r58', '1r9o', '1s19', '1s3v', '1sg0', '1sj0', '1sq5',
'1sqn', '1t40', '1t46', '1t9b', '1tow', '1tt1', '1u1c', '1uml',
'1unl', '1uou', '1v0p', '1v48', '1v4s', '1vcj', '1w1p', '1w2g',
'1xm6', '1xoq', '1xoz', '1y6b', '1ygc', '1yqy', '1yv3', '1yvf',
'1ywr', '1z95', '2bm2', '2br1', '2bsm']
# use remote csv if it's not present
if not isfile(desc_path):
branch = 'master' # define branch/commit
desc_url = ('https://raw.githubusercontent.com/oddt/oddt/%s'
'/oddt/scoring/functions/PLECscore/'
'plecscore_descs_p%i_l%i.csv.gz' %
(branch, self.depth_protein, self.depth_ligand))
warnings.warn('The CSV for PLEC P%i L%i is missing. Trying to '
'get it from ODDT GitHub.' % (self.depth_protein,
self.depth_ligand))
# download and save CSV
pd.read_csv(desc_url, index_col='pdbid').to_csv(
desc_path, compression='gzip')
# set PLEC size to unfolded
super(PLECscore, self)._load_pdbbind_desc(
desc_path,
train_set=('general', 'refined'),
pdbbind_version=pdbbind_version,
train_blacklist=pdbids_blacklist,
fold_size=self.size,
)
print('Training PLECscore %s with depths P%i L%i on PDBBind v%i'
% (self.version, self.depth_protein, self.depth_ligand,
pdbbind_version), file=sys.stderr)
self.model.fit(self.train_descs, self.train_target)
sets = [
('Test', self.model.predict(self.test_descs), self.test_target),
('Train', self.model.predict(self.train_descs), self.train_target)]
if self.version == 'rf':
sets.append(('OOB', self.model.oob_prediction_, self.train_target))
for name, pred, target in sets:
print('%s set:' % name,
'R2_score: %.4f' % r2_score(target, pred),
'Rp: %.4f' % pearsonr(target, pred)[0],
'RMSE: %.4f' % rmse(target, pred),
'SD: %.4f' % standard_deviation_error(target, pred),
sep='\t', file=sys.stderr)
if sf_pickle is None:
return self.save('PLEC%s_p%i_l%i_pdbbind%i_s%i.pickle'
% (self.version, self.depth_protein,
self.depth_ligand, pdbbind_version, self.size))
else:
return self.save(sf_pickle)
@classmethod
def load(self, filename=None, version='linear', pdbbind_version=2016,
depth_protein=5, depth_ligand=1, size=65536):
if filename is None:
# FIXME: it would be cool to have templates of names for a class
fname = ('PLEC%s_p%i_l%i_pdbbind%i_s%i.pickle' %
(version, depth_protein, depth_ligand,
pdbbind_version, size))
for f in [fname, path_join(dirname(__file__), fname)]:
if isfile(f):
filename = f
break
else:
print('No pickle, training new scoring function.',
file=sys.stderr)
sf = PLECscore(version=version)
filename = sf.train(sf_pickle=filename,
pdbbind_version=pdbbind_version)
return scorer.load(filename)
| bsd-3-clause |
hvanhovell/spark | python/pyspark/sql/functions.py | 2 | 143764 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A collections of builtin functions
"""
import sys
import functools
import warnings
if sys.version < "3":
from itertools import imap as map
if sys.version >= '3':
basestring = str
from pyspark import since, SparkContext
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.column import Column, _to_java_column, _to_seq, _create_column_from_literal, \
_create_column_from_name
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import StringType, DataType
# Keep UserDefinedFunction import for backwards compatible import; moved in SPARK-22409
from pyspark.sql.udf import UserDefinedFunction, _create_udf
from pyspark.sql.utils import to_str
# Note to developers: all of PySpark functions here take string as column names whenever possible.
# Namely, if columns are referred as arguments, they can be always both Column or string,
# even though there might be few exceptions for legacy or inevitable reasons.
# If you are fixing other language APIs together, also please note that Scala side is not the case
# since it requires to make every single overridden definition.
def _create_function(name, doc=""):
"""Create a PySpark function by its name"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_function_over_column(name, doc=""):
"""Similar with `_create_function` but creates a PySpark function that takes a column
(as string as well). This is mainly for PySpark functions to take strings as
column names.
"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(_to_java_column(col))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _wrap_deprecated_function(func, message):
""" Wrap the deprecated function to print out deprecation warnings"""
def _(col):
warnings.warn(message, DeprecationWarning)
return func(col)
return functools.wraps(func)(_)
def _create_binary_mathfunction(name, doc=""):
""" Create a binary mathfunction by name"""
def _(col1, col2):
sc = SparkContext._active_spark_context
# For legacy reasons, the arguments here can be implicitly converted into floats,
# if they are not columns or strings.
if isinstance(col1, Column):
arg1 = col1._jc
elif isinstance(col1, basestring):
arg1 = _create_column_from_name(col1)
else:
arg1 = float(col1)
if isinstance(col2, Column):
arg2 = col2._jc
elif isinstance(col2, basestring):
arg2 = _create_column_from_name(col2)
else:
arg2 = float(col2)
jc = getattr(sc._jvm.functions, name)(arg1, arg2)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_window_function(name, doc=''):
""" Create a window function by name """
def _():
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)()
return Column(jc)
_.__name__ = name
_.__doc__ = 'Window function: ' + doc
return _
def _options_to_str(options):
return {key: to_str(value) for (key, value) in options.items()}
_lit_doc = """
Creates a :class:`Column` of literal value.
>>> df.select(lit(5).alias('height')).withColumn('spark_user', lit(True)).take(1)
[Row(height=5, spark_user=True)]
"""
_functions = {
'lit': _lit_doc,
'col': 'Returns a :class:`Column` based on the given column name.',
'column': 'Returns a :class:`Column` based on the given column name.',
'asc': 'Returns a sort expression based on the ascending order of the given column name.',
'desc': 'Returns a sort expression based on the descending order of the given column name.',
}
_functions_over_column = {
'sqrt': 'Computes the square root of the specified float value.',
'abs': 'Computes the absolute value.',
'max': 'Aggregate function: returns the maximum value of the expression in a group.',
'min': 'Aggregate function: returns the minimum value of the expression in a group.',
'count': 'Aggregate function: returns the number of items in a group.',
'sum': 'Aggregate function: returns the sum of all values in the expression.',
'avg': 'Aggregate function: returns the average of the values in a group.',
'mean': 'Aggregate function: returns the average of the values in a group.',
'sumDistinct': 'Aggregate function: returns the sum of distinct values in the expression.',
}
_functions_1_4_over_column = {
# unary math functions
'acos': ':return: inverse cosine of `col`, as if computed by `java.lang.Math.acos()`',
'asin': ':return: inverse sine of `col`, as if computed by `java.lang.Math.asin()`',
'atan': ':return: inverse tangent of `col`, as if computed by `java.lang.Math.atan()`',
'cbrt': 'Computes the cube-root of the given value.',
'ceil': 'Computes the ceiling of the given value.',
'cos': """:param col: angle in radians
:return: cosine of the angle, as if computed by `java.lang.Math.cos()`.""",
'cosh': """:param col: hyperbolic angle
:return: hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh()`""",
'exp': 'Computes the exponential of the given value.',
'expm1': 'Computes the exponential of the given value minus one.',
'floor': 'Computes the floor of the given value.',
'log': 'Computes the natural logarithm of the given value.',
'log10': 'Computes the logarithm of the given value in Base 10.',
'log1p': 'Computes the natural logarithm of the given value plus one.',
'rint': 'Returns the double value that is closest in value to the argument and' +
' is equal to a mathematical integer.',
'signum': 'Computes the signum of the given value.',
'sin': """:param col: angle in radians
:return: sine of the angle, as if computed by `java.lang.Math.sin()`""",
'sinh': """:param col: hyperbolic angle
:return: hyperbolic sine of the given value,
as if computed by `java.lang.Math.sinh()`""",
'tan': """:param col: angle in radians
:return: tangent of the given value, as if computed by `java.lang.Math.tan()`""",
'tanh': """:param col: hyperbolic angle
:return: hyperbolic tangent of the given value,
as if computed by `java.lang.Math.tanh()`""",
'toDegrees': '.. note:: Deprecated in 2.1, use :func:`degrees` instead.',
'toRadians': '.. note:: Deprecated in 2.1, use :func:`radians` instead.',
'bitwiseNOT': 'Computes bitwise not.',
}
_functions_2_4 = {
'asc_nulls_first': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values return before non-null values.',
'asc_nulls_last': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values appear after non-null values.',
'desc_nulls_first': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear before non-null values.',
'desc_nulls_last': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear after non-null values',
}
_collect_list_doc = """
Aggregate function: returns a list of objects with duplicates.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_list('age')).collect()
[Row(collect_list(age)=[2, 5, 5])]
"""
_collect_set_doc = """
Aggregate function: returns a set of objects with duplicate elements eliminated.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_set('age')).collect()
[Row(collect_set(age)=[5, 2])]
"""
_functions_1_6_over_column = {
# unary math functions
'stddev': 'Aggregate function: alias for stddev_samp.',
'stddev_samp': 'Aggregate function: returns the unbiased sample standard deviation of' +
' the expression in a group.',
'stddev_pop': 'Aggregate function: returns population standard deviation of' +
' the expression in a group.',
'variance': 'Aggregate function: alias for var_samp.',
'var_samp': 'Aggregate function: returns the unbiased sample variance of' +
' the values in a group.',
'var_pop': 'Aggregate function: returns the population variance of the values in a group.',
'skewness': 'Aggregate function: returns the skewness of the values in a group.',
'kurtosis': 'Aggregate function: returns the kurtosis of the values in a group.',
'collect_list': _collect_list_doc,
'collect_set': _collect_set_doc
}
_functions_2_1_over_column = {
# unary math functions
'degrees': """
Converts an angle measured in radians to an approximately equivalent angle
measured in degrees.
:param col: angle in radians
:return: angle in degrees, as if computed by `java.lang.Math.toDegrees()`
""",
'radians': """
Converts an angle measured in degrees to an approximately equivalent angle
measured in radians.
:param col: angle in degrees
:return: angle in radians, as if computed by `java.lang.Math.toRadians()`
""",
}
# math functions that take two arguments as input
_binary_mathfunctions = {
'atan2': """
:param col1: coordinate on y-axis
:param col2: coordinate on x-axis
:return: the `theta` component of the point
(`r`, `theta`)
in polar coordinates that corresponds to the point
(`x`, `y`) in Cartesian coordinates,
as if computed by `java.lang.Math.atan2()`
""",
'hypot': 'Computes ``sqrt(a^2 + b^2)`` without intermediate overflow or underflow.',
'pow': 'Returns the value of the first argument raised to the power of the second argument.',
}
_window_functions = {
'row_number':
"""returns a sequential number starting at 1 within a window partition.""",
'dense_rank':
"""returns the rank of rows within a window partition, without any gaps.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the DENSE_RANK function in SQL.""",
'rank':
"""returns the rank of rows within a window partition.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the RANK function in SQL.""",
'cume_dist':
"""returns the cumulative distribution of values within a window partition,
i.e. the fraction of rows that are below the current row.""",
'percent_rank':
"""returns the relative rank (i.e. percentile) of rows within a window partition.""",
}
# Wraps deprecated functions (keys) with the messages (values).
_functions_deprecated = {
}
for _name, _doc in _functions.items():
globals()[_name] = since(1.3)(_create_function(_name, _doc))
for _name, _doc in _functions_over_column.items():
globals()[_name] = since(1.3)(_create_function_over_column(_name, _doc))
for _name, _doc in _functions_1_4_over_column.items():
globals()[_name] = since(1.4)(_create_function_over_column(_name, _doc))
for _name, _doc in _binary_mathfunctions.items():
globals()[_name] = since(1.4)(_create_binary_mathfunction(_name, _doc))
for _name, _doc in _window_functions.items():
globals()[_name] = since(1.6)(_create_window_function(_name, _doc))
for _name, _doc in _functions_1_6_over_column.items():
globals()[_name] = since(1.6)(_create_function_over_column(_name, _doc))
for _name, _doc in _functions_2_1_over_column.items():
globals()[_name] = since(2.1)(_create_function_over_column(_name, _doc))
for _name, _message in _functions_deprecated.items():
globals()[_name] = _wrap_deprecated_function(globals()[_name], _message)
for _name, _doc in _functions_2_4.items():
globals()[_name] = since(2.4)(_create_function(_name, _doc))
del _name, _doc
@since(2.1)
def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd)
return Column(jc)
@since(1.6)
def broadcast(df):
"""Marks a DataFrame as small enough for use in broadcast joins."""
sc = SparkContext._active_spark_context
return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx)
@since(1.4)
def coalesce(*cols):
"""Returns the first column that is not null.
>>> cDf = spark.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b"))
>>> cDf.show()
+----+----+
| a| b|
+----+----+
|null|null|
| 1|null|
|null| 2|
+----+----+
>>> cDf.select(coalesce(cDf["a"], cDf["b"])).show()
+--------------+
|coalesce(a, b)|
+--------------+
| null|
| 1|
| 2|
+--------------+
>>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show()
+----+----+----------------+
| a| b|coalesce(a, 0.0)|
+----+----+----------------+
|null|null| 0.0|
| 1|null| 1.0|
|null| 2| 0.0|
+----+----+----------------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def corr(col1, col2):
"""Returns a new :class:`Column` for the Pearson Correlation Coefficient for ``col1``
and ``col2``.
>>> a = range(20)
>>> b = [2 * x for x in range(20)]
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(corr("a", "b").alias('c')).collect()
[Row(c=1.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.corr(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_pop(col1, col2):
"""Returns a new :class:`Column` for the population covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_pop("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_pop(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_samp(col1, col2):
"""Returns a new :class:`Column` for the sample covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_samp("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_samp(_to_java_column(col1), _to_java_column(col2)))
@since(1.3)
def countDistinct(col, *cols):
"""Returns a new :class:`Column` for distinct count of ``col`` or ``cols``.
>>> df.agg(countDistinct(df.age, df.name).alias('c')).collect()
[Row(c=2)]
>>> df.agg(countDistinct("age", "name").alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.countDistinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.3)
def first(col, ignorenulls=False):
"""Aggregate function: returns the first value in a group.
The function by default returns the first values it sees. It will return the first non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows which
may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.first(_to_java_column(col), ignorenulls)
return Column(jc)
@since(2.0)
def grouping(col):
"""
Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
or not, returns 1 for aggregated or 0 for not aggregated in the result set.
>>> df.cube("name").agg(grouping("name"), sum("age")).orderBy("name").show()
+-----+--------------+--------+
| name|grouping(name)|sum(age)|
+-----+--------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+--------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping(_to_java_column(col))
return Column(jc)
@since(2.0)
def grouping_id(*cols):
"""
Aggregate function: returns the level of grouping, equals to
(grouping(c1) << (n-1)) + (grouping(c2) << (n-2)) + ... + grouping(cn)
.. note:: The list of columns should match with grouping columns exactly, or empty (means all
the grouping columns).
>>> df.cube("name").agg(grouping_id(), sum("age")).orderBy("name").show()
+-----+-------------+--------+
| name|grouping_id()|sum(age)|
+-----+-------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+-------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping_id(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def input_file_name():
"""Creates a string column for the file name of the current Spark task.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.input_file_name())
@since(1.6)
def isnan(col):
"""An expression that returns true iff the column is NaN.
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(isnan("a").alias("r1"), isnan(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnan(_to_java_column(col)))
@since(1.6)
def isnull(col):
"""An expression that returns true iff the column is null.
>>> df = spark.createDataFrame([(1, None), (None, 2)], ("a", "b"))
>>> df.select(isnull("a").alias("r1"), isnull(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnull(_to_java_column(col)))
@since(1.3)
def last(col, ignorenulls=False):
"""Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows
which may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.last(_to_java_column(col), ignorenulls)
return Column(jc)
@since(1.6)
def monotonically_increasing_id():
"""A column that generates monotonically increasing 64-bit integers.
The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
The current implementation puts the partition ID in the upper 31 bits, and the record number
within each partition in the lower 33 bits. The assumption is that the data frame has
less than 1 billion partitions, and each partition has less than 8 billion records.
.. note:: The function is non-deterministic because its result depends on partition IDs.
As an example, consider a :class:`DataFrame` with two partitions, each with 3 records.
This expression would return the following IDs:
0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
>>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
>>> df0.select(monotonically_increasing_id().alias('id')).collect()
[Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.monotonically_increasing_id())
@since(1.6)
def nanvl(col1, col2):
"""Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(1.4)
def rand(seed=None):
"""Generates a random column with independent and identically distributed (i.i.d.) samples
from U[0.0, 1.0].
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('rand', rand(seed=42) * 3).collect()
[Row(age=2, name=u'Alice', rand=2.4052597283576684),
Row(age=5, name=u'Bob', rand=2.3913904055683974)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.rand(seed)
else:
jc = sc._jvm.functions.rand()
return Column(jc)
@ignore_unicode_prefix
@since(1.4)
def randn(seed=None):
"""Generates a column with independent and identically distributed (i.i.d.) samples from
the standard normal distribution.
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('randn', randn(seed=42)).collect()
[Row(age=2, name=u'Alice', randn=1.1027054481455365),
Row(age=5, name=u'Bob', randn=0.7400395449950132)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.randn(seed)
else:
jc = sc._jvm.functions.randn()
return Column(jc)
@since(1.5)
def round(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect()
[Row(r=3.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.round(_to_java_column(col), scale))
@since(2.0)
def bround(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_EVEN rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(bround('a', 0).alias('r')).collect()
[Row(r=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.bround(_to_java_column(col), scale))
@since(1.5)
def shiftLeft(col, numBits):
"""Shift the given value numBits left.
>>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect()
[Row(r=42)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits))
@since(1.5)
def shiftRight(col, numBits):
"""(Signed) shift the given value numBits right.
>>> spark.createDataFrame([(42,)], ['a']).select(shiftRight('a', 1).alias('r')).collect()
[Row(r=21)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits)
return Column(jc)
@since(1.5)
def shiftRightUnsigned(col, numBits):
"""Unsigned shift the given value numBits right.
>>> df = spark.createDataFrame([(-42,)], ['a'])
>>> df.select(shiftRightUnsigned('a', 1).alias('r')).collect()
[Row(r=9223372036854775787)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits)
return Column(jc)
@since(1.6)
def spark_partition_id():
"""A column for partition ID.
.. note:: This is indeterministic because it depends on data partitioning and task scheduling.
>>> df.repartition(1).select(spark_partition_id().alias("pid")).collect()
[Row(pid=0), Row(pid=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.spark_partition_id())
@since(1.5)
def expr(str):
"""Parses the expression string into the column that it represents
>>> df.select(expr("length(name)")).collect()
[Row(length(name)=5), Row(length(name)=3)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.expr(str))
@ignore_unicode_prefix
@since(1.4)
def struct(*cols):
"""Creates a new struct column.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> df.select(struct('age', 'name').alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
>>> df.select(struct([df.age, df.name]).alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def greatest(*cols):
"""
Returns the greatest value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(greatest(df.a, df.b, df.c).alias("greatest")).collect()
[Row(greatest=4)]
"""
if len(cols) < 2:
raise ValueError("greatest should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.greatest(_to_seq(sc, cols, _to_java_column)))
@since(1.5)
def least(*cols):
"""
Returns the least value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(least(df.a, df.b, df.c).alias("least")).collect()
[Row(least=1)]
"""
if len(cols) < 2:
raise ValueError("least should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.least(_to_seq(sc, cols, _to_java_column)))
@since(1.4)
def when(condition, value):
"""Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
:param condition: a boolean :class:`Column` expression.
:param value: a literal value, or a :class:`Column` expression.
>>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect()
[Row(age=3), Row(age=4)]
>>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect()
[Row(age=3), Row(age=None)]
"""
sc = SparkContext._active_spark_context
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = sc._jvm.functions.when(condition._jc, v)
return Column(jc)
@since(1.5)
def log(arg1, arg2=None):
"""Returns the first argument-based logarithm of the second argument.
If there is only one argument, then this takes the natural logarithm of the argument.
>>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect()
['0.30102', '0.69897']
>>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect()
['0.69314', '1.60943']
"""
sc = SparkContext._active_spark_context
if arg2 is None:
jc = sc._jvm.functions.log(_to_java_column(arg1))
else:
jc = sc._jvm.functions.log(arg1, _to_java_column(arg2))
return Column(jc)
@since(1.5)
def log2(col):
"""Returns the base-2 logarithm of the argument.
>>> spark.createDataFrame([(4,)], ['a']).select(log2('a').alias('log2')).collect()
[Row(log2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.log2(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def conv(col, fromBase, toBase):
"""
Convert a number in a string column from one base to another.
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex=u'15')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase))
@since(1.5)
def factorial(col):
"""
Computes the factorial of the given value.
>>> df = spark.createDataFrame([(5,)], ['n'])
>>> df.select(factorial(df.n).alias('f')).collect()
[Row(f=120)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.factorial(_to_java_column(col)))
# --------------- Window functions ------------------------
@since(1.4)
def lag(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows before the current row, and
`defaultValue` if there is less than `offset` rows before the current row. For example,
an `offset` of one will return the previous row at any given point in the window partition.
This is equivalent to the LAG function in SQL.
:param col: name of column or expression
:param offset: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lag(_to_java_column(col), offset, default))
@since(1.4)
def lead(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows after the current row, and
`defaultValue` if there is less than `offset` rows after the current row. For example,
an `offset` of one will return the next row at any given point in the window partition.
This is equivalent to the LEAD function in SQL.
:param col: name of column or expression
:param offset: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lead(_to_java_column(col), offset, default))
@since(1.4)
def ntile(n):
"""
Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. For example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is equivalent to the NTILE function in SQL.
:param n: an integer
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.ntile(int(n)))
# ---------------------- Date/Timestamp functions ------------------------------
@since(1.5)
def current_date():
"""
Returns the current date as a :class:`DateType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_date())
def current_timestamp():
"""
Returns the current timestamp as a :class:`TimestampType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_timestamp())
@ignore_unicode_prefix
@since(1.5)
def date_format(date, format):
"""
Converts a date/timestamp/string to a value of string in the format specified by the date
format given by the second argument.
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
pattern letters of the Java class `java.time.format.DateTimeFormatter` can be used.
.. note:: Use when ever possible specialized functions like `year`. These benefit from a
specialized implementation.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect()
[Row(date=u'04/08/2015')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_format(_to_java_column(date), format))
@since(1.5)
def year(col):
"""
Extract the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(year('dt').alias('year')).collect()
[Row(year=2015)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.year(_to_java_column(col)))
@since(1.5)
def quarter(col):
"""
Extract the quarter of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(quarter('dt').alias('quarter')).collect()
[Row(quarter=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.quarter(_to_java_column(col)))
@since(1.5)
def month(col):
"""
Extract the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(month('dt').alias('month')).collect()
[Row(month=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.month(_to_java_column(col)))
@since(2.3)
def dayofweek(col):
"""
Extract the day of the week of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofweek('dt').alias('day')).collect()
[Row(day=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofweek(_to_java_column(col)))
@since(1.5)
def dayofmonth(col):
"""
Extract the day of the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofmonth('dt').alias('day')).collect()
[Row(day=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofmonth(_to_java_column(col)))
@since(1.5)
def dayofyear(col):
"""
Extract the day of the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofyear('dt').alias('day')).collect()
[Row(day=98)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofyear(_to_java_column(col)))
@since(1.5)
def hour(col):
"""
Extract the hours of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(hour('ts').alias('hour')).collect()
[Row(hour=13)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.hour(_to_java_column(col)))
@since(1.5)
def minute(col):
"""
Extract the minutes of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(minute('ts').alias('minute')).collect()
[Row(minute=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.minute(_to_java_column(col)))
@since(1.5)
def second(col):
"""
Extract the seconds of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(second('ts').alias('second')).collect()
[Row(second=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.second(_to_java_column(col)))
@since(1.5)
def weekofyear(col):
"""
Extract the week number of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(weekofyear(df.dt).alias('week')).collect()
[Row(week=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.weekofyear(_to_java_column(col)))
@since(1.5)
def date_add(start, days):
"""
Returns the date that is `days` days after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_add(_to_java_column(start), days))
@since(1.5)
def date_sub(start, days):
"""
Returns the date that is `days` days before `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_sub(df.dt, 1).alias('prev_date')).collect()
[Row(prev_date=datetime.date(2015, 4, 7))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_sub(_to_java_column(start), days))
@since(1.5)
def datediff(end, start):
"""
Returns the number of days from `start` to `end`.
>>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2'])
>>> df.select(datediff(df.d2, df.d1).alias('diff')).collect()
[Row(diff=32)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start)))
@since(1.5)
def add_months(start, months):
"""
Returns the date that is `months` months after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(add_months(df.dt, 1).alias('next_month')).collect()
[Row(next_month=datetime.date(2015, 5, 8))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.add_months(_to_java_column(start), months))
@since(1.5)
def months_between(date1, date2, roundOff=True):
"""
Returns number of months between dates date1 and date2.
If date1 is later than date2, then the result is positive.
If date1 and date2 are on the same day of month, or both are the last day of month,
returns an integer (time of day will be ignored).
The result is rounded off to 8 digits unless `roundOff` is set to `False`.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', '1996-10-30')], ['date1', 'date2'])
>>> df.select(months_between(df.date1, df.date2).alias('months')).collect()
[Row(months=3.94959677)]
>>> df.select(months_between(df.date1, df.date2, False).alias('months')).collect()
[Row(months=3.9495967741935485)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.months_between(
_to_java_column(date1), _to_java_column(date2), roundOff))
@since(2.2)
def to_date(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`DateTimeFormatter <https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html>`_. # noqa
By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format
is omitted (equivalent to ``col.cast("date")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t, 'yyyy-MM-dd HH:mm:ss').alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_date(_to_java_column(col))
else:
jc = sc._jvm.functions.to_date(_to_java_column(col), format)
return Column(jc)
@since(2.2)
def to_timestamp(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`DateTimeFormatter <https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html>`_. # noqa
By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format
is omitted (equivalent to ``col.cast("timestamp")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t).alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t, 'yyyy-MM-dd HH:mm:ss').alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col))
else:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col), format)
return Column(jc)
@since(1.5)
def trunc(date, format):
"""
Returns date truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy' or 'month', 'mon', 'mm'
>>> df = spark.createDataFrame([('1997-02-28',)], ['d'])
>>> df.select(trunc(df.d, 'year').alias('year')).collect()
[Row(year=datetime.date(1997, 1, 1))]
>>> df.select(trunc(df.d, 'mon').alias('month')).collect()
[Row(month=datetime.date(1997, 2, 1))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.trunc(_to_java_column(date), format))
@since(2.3)
def date_trunc(format, timestamp):
"""
Returns timestamp truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy', 'month', 'mon', 'mm',
'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter'
>>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t'])
>>> df.select(date_trunc('year', df.t).alias('year')).collect()
[Row(year=datetime.datetime(1997, 1, 1, 0, 0))]
>>> df.select(date_trunc('mon', df.t).alias('month')).collect()
[Row(month=datetime.datetime(1997, 2, 1, 0, 0))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp)))
@since(1.5)
def next_day(date, dayOfWeek):
"""
Returns the first date which is later than the value of the date column.
Day of the week parameter is case insensitive, and accepts:
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun".
>>> df = spark.createDataFrame([('2015-07-27',)], ['d'])
>>> df.select(next_day(df.d, 'Sun').alias('date')).collect()
[Row(date=datetime.date(2015, 8, 2))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek))
@since(1.5)
def last_day(date):
"""
Returns the last day of the month which the given date belongs to.
>>> df = spark.createDataFrame([('1997-02-10',)], ['d'])
>>> df.select(last_day(df.d).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.last_day(_to_java_column(date)))
@ignore_unicode_prefix
@since(1.5)
def from_unixtime(timestamp, format="uuuu-MM-dd HH:mm:ss"):
"""
Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
representing the timestamp of that moment in the current system time zone in the given
format.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([(1428476400,)], ['unix_time'])
>>> time_df.select(from_unixtime('unix_time').alias('ts')).collect()
[Row(ts=u'2015-04-08 00:00:00')]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.from_unixtime(_to_java_column(timestamp), format))
@since(1.5)
def unix_timestamp(timestamp=None, format='uuuu-MM-dd HH:mm:ss'):
"""
Convert time string with given pattern ('uuuu-MM-dd HH:mm:ss', by default)
to Unix time stamp (in seconds), using the default timezone and the default
locale, return null if fail.
if `timestamp` is None, then it returns current timestamp.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> time_df.select(unix_timestamp('dt', 'yyyy-MM-dd').alias('unix_time')).collect()
[Row(unix_time=1428476400)]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
if timestamp is None:
return Column(sc._jvm.functions.unix_timestamp())
return Column(sc._jvm.functions.unix_timestamp(_to_java_column(timestamp), format))
@since(1.5)
def from_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and
renders that timestamp as a timestamp in the given time zone.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to
the given timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))]
>>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))]
.. note:: Deprecated in 3.0. See SPARK-25496
"""
warnings.warn("Deprecated in 3.0. See SPARK-25496", DeprecationWarning)
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz))
@since(1.5)
def to_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in the given
timezone, and renders that timestamp as a timestamp in UTC.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from the given
timezone to UTC timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(to_utc_timestamp(df.ts, "PST").alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 18, 30))]
>>> df.select(to_utc_timestamp(df.ts, df.tz).alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 1, 30))]
.. note:: Deprecated in 3.0. See SPARK-25496
"""
warnings.warn("Deprecated in 3.0. See SPARK-25496", DeprecationWarning)
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.to_utc_timestamp(_to_java_column(timestamp), tz))
@since(2.0)
@ignore_unicode_prefix
def window(timeColumn, windowDuration, slideDuration=None, startTime=None):
"""Bucketize rows into one or more time windows given a timestamp specifying column. Window
starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
[12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
the order of months are not supported.
The time column must be of :class:`pyspark.sql.types.TimestampType`.
Durations are provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid
interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'.
If the ``slideDuration`` is not provided, the windows will be tumbling windows.
The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start
window intervals. For example, in order to have hourly tumbling windows that start 15 minutes
past the hour, e.g. 12:15-13:15, 13:15-14:15... provide `startTime` as `15 minutes`.
The output column will be a struct called 'window' by default with the nested columns 'start'
and 'end', where 'start' and 'end' will be of :class:`pyspark.sql.types.TimestampType`.
>>> df = spark.createDataFrame([("2016-03-11 09:00:07", 1)]).toDF("date", "val")
>>> w = df.groupBy(window("date", "5 seconds")).agg(sum("val").alias("sum"))
>>> w.select(w.window.start.cast("string").alias("start"),
... w.window.end.cast("string").alias("end"), "sum").collect()
[Row(start=u'2016-03-11 09:00:05', end=u'2016-03-11 09:00:10', sum=1)]
"""
def check_string_field(field, fieldName):
if not field or type(field) is not str:
raise TypeError("%s should be provided as a string" % fieldName)
sc = SparkContext._active_spark_context
time_col = _to_java_column(timeColumn)
check_string_field(windowDuration, "windowDuration")
if slideDuration and startTime:
check_string_field(slideDuration, "slideDuration")
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration, startTime)
elif slideDuration:
check_string_field(slideDuration, "slideDuration")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration)
elif startTime:
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, windowDuration, startTime)
else:
res = sc._jvm.functions.window(time_col, windowDuration)
return Column(res)
# ---------------------------- misc functions ----------------------------------
@since(1.5)
@ignore_unicode_prefix
def crc32(col):
"""
Calculates the cyclic redundancy check value (CRC32) of a binary column and
returns the value as a bigint.
>>> spark.createDataFrame([('ABC',)], ['a']).select(crc32('a').alias('crc32')).collect()
[Row(crc32=2743272264)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.crc32(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def md5(col):
"""Calculates the MD5 digest and returns the value as a 32 character hex string.
>>> spark.createDataFrame([('ABC',)], ['a']).select(md5('a').alias('hash')).collect()
[Row(hash=u'902fbdd2b1df0c4f70b4a5d23525e932')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.md5(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha1(col):
"""Returns the hex string result of SHA-1.
>>> spark.createDataFrame([('ABC',)], ['a']).select(sha1('a').alias('hash')).collect()
[Row(hash=u'3c01bdbb26f358bab27f267924aa2c9a03fcfdb8')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha1(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha2(col, numBits):
"""Returns the hex string result of SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384,
and SHA-512). The numBits indicates the desired bit length of the result, which must have a
value of 224, 256, 384, 512, or 0 (which is equivalent to 256).
>>> digests = df.select(sha2(df.name, 256).alias('s')).collect()
>>> digests[0]
Row(s=u'3bc51062973c458d5a6f2d8d64a023246354ad7e064b1e4e009ec8a0699a3043')
>>> digests[1]
Row(s=u'cd9fb1e148ccd8442e5aa74904cc73bf6fb54d1d54d333bd596aa9bb4bb4e961')
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha2(_to_java_column(col), numBits)
return Column(jc)
@since(2.0)
def hash(*cols):
"""Calculates the hash code of given columns, and returns the result as an int column.
>>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect()
[Row(hash=-757602832)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(3.0)
def xxhash64(*cols):
"""Calculates the hash code of given columns using the 64-bit variant of the xxHash algorithm,
and returns the result as a long column.
>>> spark.createDataFrame([('ABC',)], ['a']).select(xxhash64('a').alias('hash')).collect()
[Row(hash=4105715581806190027)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.xxhash64(_to_seq(sc, cols, _to_java_column))
return Column(jc)
# ---------------------- String/Binary functions ------------------------------
_string_functions = {
'upper': 'Converts a string expression to upper case.',
'lower': 'Converts a string expression to lower case.',
'ascii': 'Computes the numeric value of the first character of the string column.',
'base64': 'Computes the BASE64 encoding of a binary column and returns it as a string column.',
'unbase64': 'Decodes a BASE64 encoded string column and returns it as a binary column.',
'ltrim': 'Trim the spaces from left end for the specified string value.',
'rtrim': 'Trim the spaces from right end for the specified string value.',
'trim': 'Trim the spaces from both ends for the specified string column.',
}
for _name, _doc in _string_functions.items():
globals()[_name] = since(1.5)(_create_function_over_column(_name, _doc))
del _name, _doc
@since(1.5)
@ignore_unicode_prefix
def concat_ws(sep, *cols):
"""
Concatenates multiple input string columns together into a single string column,
using the given separator.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect()
[Row(s=u'abcd-123')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def decode(col, charset):
"""
Computes the first argument into a string from a binary using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.decode(_to_java_column(col), charset))
@since(1.5)
def encode(col, charset):
"""
Computes the first argument into a binary from a string using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.encode(_to_java_column(col), charset))
@ignore_unicode_prefix
@since(1.5)
def format_number(col, d):
"""
Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places
with HALF_EVEN round mode, and returns the result as a string.
:param col: the column name of the numeric value to be formatted
:param d: the N decimal places
>>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect()
[Row(v=u'5.0000')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_number(_to_java_column(col), d))
@ignore_unicode_prefix
@since(1.5)
def format_string(format, *cols):
"""
Formats the arguments in printf-style and returns the result as a string column.
:param format: string that can contain embedded format tags and used as result column's value
:param cols: list of column names (string) or list of :class:`Column` expressions to
be used in formatting
>>> df = spark.createDataFrame([(5, "hello")], ['a', 'b'])
>>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect()
[Row(v=u'5 hello')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def instr(str, substr):
"""
Locate the position of the first occurrence of substr column in the given string.
Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(instr(df.s, 'b').alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.instr(_to_java_column(str), substr))
@since(1.5)
@ignore_unicode_prefix
def substring(str, pos, len):
"""
Substring starts at `pos` and is of length `len` when str is String type or
returns the slice of byte array that starts at `pos` in byte and is of length `len`
when str is Binary type.
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(substring(df.s, 1, 2).alias('s')).collect()
[Row(s=u'ab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len))
@since(1.5)
@ignore_unicode_prefix
def substring_index(str, delim, count):
"""
Returns the substring from string str before count occurrences of the delimiter delim.
If count is positive, everything the left of the final delimiter (counting from left) is
returned. If count is negative, every to the right of the final delimiter (counting from the
right) is returned. substring_index performs a case-sensitive match when searching for delim.
>>> df = spark.createDataFrame([('a.b.c.d',)], ['s'])
>>> df.select(substring_index(df.s, '.', 2).alias('s')).collect()
[Row(s=u'a.b')]
>>> df.select(substring_index(df.s, '.', -3).alias('s')).collect()
[Row(s=u'b.c.d')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring_index(_to_java_column(str), delim, count))
@ignore_unicode_prefix
@since(1.5)
def levenshtein(left, right):
"""Computes the Levenshtein distance of the two given strings.
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right))
return Column(jc)
@since(1.5)
def locate(substr, str, pos=1):
"""
Locate the position of the first occurrence of substr in a string column, after position pos.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
:param substr: a string
:param str: a Column of :class:`pyspark.sql.types.StringType`
:param pos: start position (zero based)
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(locate('b', df.s, 1).alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos))
@since(1.5)
@ignore_unicode_prefix
def lpad(col, len, pad):
"""
Left-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(lpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'##abcd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def rpad(col, len, pad):
"""
Right-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(rpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'abcd##')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.rpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def repeat(col, n):
"""
Repeats a string column n times, and returns it as a new string column.
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s=u'ababab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.repeat(_to_java_column(col), n))
@since(1.5)
@ignore_unicode_prefix
def split(str, pattern, limit=-1):
"""
Splits str around matches of the given pattern.
:param str: a string expression to split
:param pattern: a string representing a regular expression. The regex string should be
a Java regular expression.
:param limit: an integer which controls the number of times `pattern` is applied.
* ``limit > 0``: The resulting array's length will not be more than `limit`, and the
resulting array's last entry will contain all input beyond the last
matched pattern.
* ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting
array can be of any size.
.. versionchanged:: 3.0
`split` now takes an optional `limit` field. If not provided, default limit value is -1.
>>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',])
>>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect()
[Row(s=[u'one', u'twoBthreeC'])]
>>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect()
[Row(s=[u'one', u'two', u'three', u''])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit))
@ignore_unicode_prefix
@since(1.5)
def regexp_extract(str, pattern, idx):
r"""Extract a specific group matched by a Java regex, from the specified string column.
If the regex did not match, or the specified group did not match, an empty string is returned.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect()
[Row(d=u'100')]
>>> df = spark.createDataFrame([('foo',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect()
[Row(d=u'')]
>>> df = spark.createDataFrame([('aaaac',)], ['str'])
>>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect()
[Row(d=u'')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def regexp_replace(str, pattern, replacement):
r"""Replace all substrings of the specified string value that match regexp with rep.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect()
[Row(d=u'-----')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def initcap(col):
"""Translate the first letter of each word to upper case in the sentence.
>>> spark.createDataFrame([('ab cd',)], ['a']).select(initcap("a").alias('v')).collect()
[Row(v=u'Ab Cd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.initcap(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def soundex(col):
"""
Returns the SoundEx encoding for a string
>>> df = spark.createDataFrame([("Peters",),("Uhrbach",)], ['name'])
>>> df.select(soundex(df.name).alias("soundex")).collect()
[Row(soundex=u'P362'), Row(soundex=u'U612')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.soundex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def bin(col):
"""Returns the string representation of the binary value of the given column.
>>> df.select(bin(df.age).alias('c')).collect()
[Row(c=u'10'), Row(c=u'101')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.bin(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def hex(col):
"""Computes hex value of the given column, which could be :class:`pyspark.sql.types.StringType`,
:class:`pyspark.sql.types.BinaryType`, :class:`pyspark.sql.types.IntegerType` or
:class:`pyspark.sql.types.LongType`.
>>> spark.createDataFrame([('ABC', 3)], ['a', 'b']).select(hex('a'), hex('b')).collect()
[Row(hex(a)=u'414243', hex(b)=u'3')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hex(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def unhex(col):
"""Inverse of hex. Interprets each pair of characters as a hexadecimal number
and converts to the byte representation of number.
>>> spark.createDataFrame([('414243',)], ['a']).select(unhex('a')).collect()
[Row(unhex(a)=bytearray(b'ABC'))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.unhex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def length(col):
"""Computes the character length of string data or number of bytes of binary data.
The length of character data includes the trailing spaces. The length of binary data
includes binary zeros.
>>> spark.createDataFrame([('ABC ',)], ['a']).select(length('a').alias('length')).collect()
[Row(length=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.length(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r=u'1a2s3ae')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace))
# ---------------------- Collection functions ------------------------------
@ignore_unicode_prefix
@since(2.0)
def create_map(*cols):
"""Creates a new map column.
:param cols: list of column names (string) or list of :class:`Column` expressions that are
grouped as key-value pairs, e.g. (key1, value1, key2, value2, ...).
>>> df.select(create_map('name', 'age').alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
>>> df.select(create_map([df.name, df.age]).alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def map_from_arrays(col1, col2):
"""Creates a new map from two arrays.
:param col1: name of column containing a set of keys. All elements should not be null
:param col2: name of column containing a set of values
>>> df = spark.createDataFrame([([2, 5], ['a', 'b'])], ['k', 'v'])
>>> df.select(map_from_arrays(df.k, df.v).alias("map")).show()
+----------------+
| map|
+----------------+
|[2 -> a, 5 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_arrays(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def array(*cols):
"""Creates a new array column.
:param cols: list of column names (string) or list of :class:`Column` expressions that have
the same data type.
>>> df.select(array('age', 'age').alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
>>> df.select(array([df.age, df.age]).alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def array_contains(col, value):
"""
Collection function: returns null if the array is null, true if the array contains the
given value, and false otherwise.
:param col: name of column containing array
:param value: value to check for in array
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(array_contains(df.data, "a")).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_contains(_to_java_column(col), value))
@since(2.4)
def arrays_overlap(a1, a2):
"""
Collection function: returns true if the arrays contain any common non-null element; if not,
returns null if both the arrays are non-empty and any of them contains a null element; returns
false otherwise.
>>> df = spark.createDataFrame([(["a", "b"], ["b", "c"]), (["a"], ["b", "c"])], ['x', 'y'])
>>> df.select(arrays_overlap(df.x, df.y).alias("overlap")).collect()
[Row(overlap=True), Row(overlap=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_overlap(_to_java_column(a1), _to_java_column(a2)))
@since(2.4)
def slice(x, start, length):
"""
Collection function: returns an array containing all the elements in `x` from index `start`
(array indices start at 1, or from the end if `start` is negative) with the specified `length`.
>>> df = spark.createDataFrame([([1, 2, 3],), ([4, 5],)], ['x'])
>>> df.select(slice(df.x, 2, 2).alias("sliced")).collect()
[Row(sliced=[2, 3]), Row(sliced=[5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.slice(_to_java_column(x), start, length))
@ignore_unicode_prefix
@since(2.4)
def array_join(col, delimiter, null_replacement=None):
"""
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
`null_replacement` if set, otherwise they are ignored.
>>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data'])
>>> df.select(array_join(df.data, ",").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a')]
>>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a,NULL')]
"""
sc = SparkContext._active_spark_context
if null_replacement is None:
return Column(sc._jvm.functions.array_join(_to_java_column(col), delimiter))
else:
return Column(sc._jvm.functions.array_join(
_to_java_column(col), delimiter, null_replacement))
@since(1.5)
@ignore_unicode_prefix
def concat(*cols):
"""
Concatenates multiple input columns together into a single column.
The function works with strings, binary and compatible array columns.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat(df.s, df.d).alias('s')).collect()
[Row(s=u'abcd123')]
>>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c'])
>>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect()
[Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def array_position(col, value):
"""
Collection function: Locates the position of the first occurrence of the given value
in the given array. Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if the given
value could not be found in the array.
>>> df = spark.createDataFrame([(["c", "b", "a"],), ([],)], ['data'])
>>> df.select(array_position(df.data, "a")).collect()
[Row(array_position(data, a)=3), Row(array_position(data, a)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_position(_to_java_column(col), value))
@ignore_unicode_prefix
@since(2.4)
def element_at(col, extraction):
"""
Collection function: Returns element of array at given index in extraction if col is array.
Returns value for the given key in extraction if col is map.
:param col: name of column containing array or map
:param extraction: index to check for in array or key to check for in map
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(element_at(df.data, 1)).collect()
[Row(element_at(data, 1)=u'a'), Row(element_at(data, 1)=None)]
>>> df = spark.createDataFrame([({"a": 1.0, "b": 2.0},), ({},)], ['data'])
>>> df.select(element_at(df.data, lit("a"))).collect()
[Row(element_at(data, a)=1.0), Row(element_at(data, a)=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.element_at(
_to_java_column(col), lit(extraction)._jc)) # noqa: F821 'lit' is dynamically defined.
@since(2.4)
def array_remove(col, element):
"""
Collection function: Remove all elements that equal to element from the given array.
:param col: name of column containing array
:param element: element to be removed from the array
>>> df = spark.createDataFrame([([1, 2, 3, 1, 1],), ([],)], ['data'])
>>> df.select(array_remove(df.data, 1)).collect()
[Row(array_remove(data, 1)=[2, 3]), Row(array_remove(data, 1)=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_remove(_to_java_column(col), element))
@since(2.4)
def array_distinct(col):
"""
Collection function: removes duplicate values from the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3, 2],), ([4, 5, 5, 4],)], ['data'])
>>> df.select(array_distinct(df.data)).collect()
[Row(array_distinct(data)=[1, 2, 3]), Row(array_distinct(data)=[4, 5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_distinct(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_intersect(col1, col2):
"""
Collection function: returns an array of the elements in the intersection of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_intersect(df.c1, df.c2)).collect()
[Row(array_intersect(c1, c2)=[u'a', u'c'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_intersect(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_union(col1, col2):
"""
Collection function: returns an array of the elements in the union of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_union(df.c1, df.c2)).collect()
[Row(array_union(c1, c2)=[u'b', u'a', u'c', u'd', u'f'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_union(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_except(col1, col2):
"""
Collection function: returns an array of the elements in col1 but not in col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_except(df.c1, df.c2)).collect()
[Row(array_except(c1, c2)=[u'b'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_except(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def explode(col):
"""
Returns a new row for each element in the given array or map.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc)
@since(2.1)
def posexplode(col):
"""
Returns a new row for each element with position in the given array or map.
Uses the default column name `pos` for position, and `col` for elements in the
array and `key` and `value` for elements in the map unless specified otherwise.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(posexplode(eDF.intlist)).collect()
[Row(pos=0, col=1), Row(pos=1, col=2), Row(pos=2, col=3)]
>>> eDF.select(posexplode(eDF.mapfield)).show()
+---+---+-----+
|pos|key|value|
+---+---+-----+
| 0| a| b|
+---+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode(_to_java_column(col))
return Column(jc)
@since(2.3)
def explode_outer(col):
"""
Returns a new row for each element in the given array or map.
Unlike explode, if the array/map is null or empty then null is produced.
Uses the default column name `col` for elements in the array and
`key` and `value` for elements in the map unless specified otherwise.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", explode_outer("a_map")).show()
+---+----------+----+-----+
| id| an_array| key|value|
+---+----------+----+-----+
| 1|[foo, bar]| x| 1.0|
| 2| []|null| null|
| 3| null|null| null|
+---+----------+----+-----+
>>> df.select("id", "a_map", explode_outer("an_array")).show()
+---+----------+----+
| id| a_map| col|
+---+----------+----+
| 1|[x -> 1.0]| foo|
| 1|[x -> 1.0]| bar|
| 2| []|null|
| 3| null|null|
+---+----------+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode_outer(_to_java_column(col))
return Column(jc)
@since(2.3)
def posexplode_outer(col):
"""
Returns a new row for each element with position in the given array or map.
Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced.
Uses the default column name `pos` for position, and `col` for elements in the
array and `key` and `value` for elements in the map unless specified otherwise.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", posexplode_outer("a_map")).show()
+---+----------+----+----+-----+
| id| an_array| pos| key|value|
+---+----------+----+----+-----+
| 1|[foo, bar]| 0| x| 1.0|
| 2| []|null|null| null|
| 3| null|null|null| null|
+---+----------+----+----+-----+
>>> df.select("id", "a_map", posexplode_outer("an_array")).show()
+---+----------+----+----+
| id| a_map| pos| col|
+---+----------+----+----+
| 1|[x -> 1.0]| 0| foo|
| 1|[x -> 1.0]| 1| bar|
| 2| []|null|null|
| 3| null|null|null|
+---+----------+----+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode_outer(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def get_json_object(col, path):
"""
Extracts json object from a json string based on json path specified, and returns json string
of the extracted json object. It will return null if the input json string is invalid.
:param col: string column in json format
:param path: path to the json object to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\
... get_json_object(df.jstring, '$.f2').alias("c1") ).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.get_json_object(_to_java_column(col), path)
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def json_tuple(col, *fields):
"""Creates a new row for a json column according to the given field names.
:param col: string column in json format
:param fields: list of fields to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields))
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def from_json(col, schema, options={}):
"""
Parses a column containing a JSON string into a :class:`MapType` with :class:`StringType`
as keys type, :class:`StructType` or :class:`ArrayType` with
the specified schema. Returns `null`, in the case of an unparseable string.
:param col: string column in json format
:param schema: a StructType or ArrayType of StructType to use when parsing the json column.
:param options: options to control parsing. accepts the same options as the json datasource
.. note:: Since Spark 2.3, the DDL-formatted string or a JSON format string is also
supported for ``schema``.
>>> from pyspark.sql.types import *
>>> data = [(1, '''{"a": 1}''')]
>>> schema = StructType([StructField("a", IntegerType())])
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "a INT").alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "MAP<STRING,INT>").alias("json")).collect()
[Row(json={u'a': 1})]
>>> data = [(1, '''[{"a": 1}]''')]
>>> schema = ArrayType(StructType([StructField("a", IntegerType())]))
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[Row(a=1)])]
>>> schema = schema_of_json(lit('''{"a": 0}'''))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=None))]
>>> data = [(1, '''[1, 2, 3]''')]
>>> schema = ArrayType(IntegerType())
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[1, 2, 3])]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, DataType):
schema = schema.json()
elif isinstance(schema, Column):
schema = _to_java_column(schema)
jc = sc._jvm.functions.from_json(_to_java_column(col), schema, _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def to_json(col, options={}):
"""
Converts a column containing a :class:`StructType`, :class:`ArrayType` or a :class:`MapType`
into a JSON string. Throws an exception, in the case of an unsupported type.
:param col: name of column containing a struct, an array or a map.
:param options: options to control converting. accepts the same options as the JSON datasource.
Additionally the function supports the `pretty` option which enables
pretty JSON generation.
>>> from pyspark.sql import Row
>>> from pyspark.sql.types import *
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"age":2,"name":"Alice"}')]
>>> data = [(1, [Row(name='Alice', age=2), Row(name='Bob', age=3)])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"age":2,"name":"Alice"},{"age":3,"name":"Bob"}]')]
>>> data = [(1, {"name": "Alice"})]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"name":"Alice"}')]
>>> data = [(1, [{"name": "Alice"}, {"name": "Bob"}])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"name":"Alice"},{"name":"Bob"}]')]
>>> data = [(1, ["Alice", "Bob"])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'["Alice","Bob"]')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_json(_to_java_column(col), _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(2.4)
def schema_of_json(json, options={}):
"""
Parses a JSON string and infers its schema in DDL format.
:param json: a JSON string or a string literal containing a JSON string.
:param options: options to control parsing. accepts the same options as the JSON datasource
.. versionchanged:: 3.0
It accepts `options` parameter to control schema inferring.
>>> df = spark.range(1)
>>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
>>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'})
>>> df.select(schema.alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
"""
if isinstance(json, basestring):
col = _create_column_from_literal(json)
elif isinstance(json, Column):
col = _to_java_column(json)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_json(col, _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(3.0)
def schema_of_csv(csv, options={}):
"""
Parses a CSV string and infers its schema in DDL format.
:param col: a CSV string or a string literal containing a CSV string.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> df = spark.range(1)
>>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
>>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
"""
if isinstance(csv, basestring):
col = _create_column_from_literal(csv)
elif isinstance(csv, Column):
col = _to_java_column(csv)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_csv(col, _options_to_str(options))
return Column(jc)
@ignore_unicode_prefix
@since(3.0)
def to_csv(col, options={}):
"""
Converts a column containing a :class:`StructType` into a CSV string.
Throws an exception, in the case of an unsupported type.
:param col: name of column containing a struct.
:param options: options to control converting. accepts the same options as the CSV datasource.
>>> from pyspark.sql import Row
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_csv(df.value).alias("csv")).collect()
[Row(csv=u'2,Alice')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_csv(_to_java_column(col), _options_to_str(options))
return Column(jc)
@since(1.5)
def size(col):
"""
Collection function: returns the length of the array or map stored in the column.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data'])
>>> df.select(size(df.data)).collect()
[Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.size(_to_java_column(col)))
@since(2.4)
def array_min(col):
"""
Collection function: returns the minimum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_min(df.data).alias('min')).collect()
[Row(min=1), Row(min=-1)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_min(_to_java_column(col)))
@since(2.4)
def array_max(col):
"""
Collection function: returns the maximum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_max(df.data).alias('max')).collect()
[Row(max=3), Row(max=10)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_max(_to_java_column(col)))
@since(1.5)
def sort_array(col, asc=True):
"""
Collection function: sorts the input array in ascending or descending order according
to the natural ordering of the array elements. Null elements will be placed at the beginning
of the returned array in ascending order or at the end of the returned array in descending
order.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(sort_array(df.data).alias('r')).collect()
[Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])]
>>> df.select(sort_array(df.data, asc=False).alias('r')).collect()
[Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc))
@since(2.4)
def array_sort(col):
"""
Collection function: sorts the input array in ascending order. The elements of the input array
must be orderable. Null elements will be placed at the end of the returned array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(array_sort(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_sort(_to_java_column(col)))
@since(2.4)
def shuffle(col):
"""
Collection function: Generates a random permutation of the given array.
.. note:: The function is non-deterministic.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 20, 3, 5],), ([1, 20, None, 3],)], ['data'])
>>> df.select(shuffle(df.data).alias('s')).collect() # doctest: +SKIP
[Row(s=[3, 1, 5, 20]), Row(s=[20, None, 3, 1])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shuffle(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def reverse(col):
"""
Collection function: returns a reversed string or an array with reverse order of elements.
:param col: name of column or expression
>>> df = spark.createDataFrame([('Spark SQL',)], ['data'])
>>> df.select(reverse(df.data).alias('s')).collect()
[Row(s=u'LQS krapS')]
>>> df = spark.createDataFrame([([2, 1, 3],) ,([1],) ,([],)], ['data'])
>>> df.select(reverse(df.data).alias('r')).collect()
[Row(r=[3, 1, 2]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.reverse(_to_java_column(col)))
@since(2.4)
def flatten(col):
"""
Collection function: creates a single array from an array of arrays.
If a structure of nested arrays is deeper than two levels,
only one level of nesting is removed.
:param col: name of column or expression
>>> df = spark.createDataFrame([([[1, 2, 3], [4, 5], [6]],), ([None, [4, 5]],)], ['data'])
>>> df.select(flatten(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, 4, 5, 6]), Row(r=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.flatten(_to_java_column(col)))
@since(2.3)
def map_keys(col):
"""
Collection function: Returns an unordered array containing the keys of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_keys
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_keys("data").alias("keys")).show()
+------+
| keys|
+------+
|[1, 2]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_keys(_to_java_column(col)))
@since(2.3)
def map_values(col):
"""
Collection function: Returns an unordered array containing the values of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_values
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_values("data").alias("values")).show()
+------+
|values|
+------+
|[a, b]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_values(_to_java_column(col)))
@since(3.0)
def map_entries(col):
"""
Collection function: Returns an unordered array of all entries in the given map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_entries
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_entries("data").alias("entries")).show()
+----------------+
| entries|
+----------------+
|[[1, a], [2, b]]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_entries(_to_java_column(col)))
@since(2.4)
def map_from_entries(col):
"""
Collection function: Returns a map created from the given array of entries.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_from_entries
>>> df = spark.sql("SELECT array(struct(1, 'a'), struct(2, 'b')) as data")
>>> df.select(map_from_entries("data").alias("map")).show()
+----------------+
| map|
+----------------+
|[1 -> a, 2 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_entries(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_repeat(col, count):
"""
Collection function: creates an array containing a column repeated count times.
>>> df = spark.createDataFrame([('ab',)], ['data'])
>>> df.select(array_repeat(df.data, 3).alias('r')).collect()
[Row(r=[u'ab', u'ab', u'ab'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_repeat(
_to_java_column(col),
_to_java_column(count) if isinstance(count, Column) else count
))
@since(2.4)
def arrays_zip(*cols):
"""
Collection function: Returns a merged array of structs in which the N-th struct contains all
N-th values of input arrays.
:param cols: columns of arrays to be merged.
>>> from pyspark.sql.functions import arrays_zip
>>> df = spark.createDataFrame([(([1, 2, 3], [2, 3, 4]))], ['vals1', 'vals2'])
>>> df.select(arrays_zip(df.vals1, df.vals2).alias('zipped')).collect()
[Row(zipped=[Row(vals1=1, vals2=2), Row(vals1=2, vals2=3), Row(vals1=3, vals2=4)])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_zip(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def map_concat(*cols):
"""Returns the union of all the given maps.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> from pyspark.sql.functions import map_concat
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c', 1, 'd') as map2")
>>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False)
+------------------------+
|map3 |
+------------------------+
|[1 -> d, 2 -> b, 3 -> c]|
+------------------------+
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step)))
@ignore_unicode_prefix
@since(3.0)
def from_csv(col, schema, options={}):
"""
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
:param col: string column in CSV format
:param schema: a string with schema in DDL format to use when parsing the CSV column.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> data = [("1,2,3",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect()
[Row(csv=Row(a=1, b=2, c=3))]
>>> value = data[0][0]
>>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect()
[Row(csv=Row(_c0=1, _c1=2, _c2=3))]
>>> data = [(" abc",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> options = {'ignoreLeadingWhiteSpace': True}
>>> df.select(from_csv(df.value, "s string", options).alias("csv")).collect()
[Row(csv=Row(s=u'abc'))]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, basestring):
schema = _create_column_from_literal(schema)
elif isinstance(schema, Column):
schema = _to_java_column(schema)
else:
raise TypeError("schema argument should be a column or string")
jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, _options_to_str(options))
return Column(jc)
# ---------------------------- User Defined Function ----------------------------------
class PandasUDFType(object):
"""Pandas UDF Types. See :meth:`pyspark.sql.functions.pandas_udf`.
"""
SCALAR = PythonEvalType.SQL_SCALAR_PANDAS_UDF
SCALAR_ITER = PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
GROUPED_MAP = PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF
COGROUPED_MAP = PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF
GROUPED_AGG = PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
MAP_ITER = PythonEvalType.SQL_MAP_PANDAS_ITER_UDF
@since(1.3)
def udf(f=None, returnType=StringType()):
"""Creates a user defined function (UDF).
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> from pyspark.sql.types import IntegerType
>>> import random
>>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic()
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
:param f: python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> @udf
... def to_upper(s):
... if s is not None:
... return s.upper()
...
>>> @udf(returnType=IntegerType())
... def add_one(x):
... if x is not None:
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show()
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
"""
# The following table shows most of Python data and SQL type conversions in normal UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-28131's PR to see the codes in order to generate the table below.
#
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
# |SQL Type \ Python Value(Type)|None(NoneType)|True(bool)|1(int)| a(str)| 1970-01-01(date)|1970-01-01 00:00:00(datetime)|1.0(float)|array('i', [1])(array)|[1](list)| (1,)(tuple)|bytearray(b'ABC')(bytearray)| 1(Decimal)|{'a': 1}(dict)|Row(kwargs=1)(Row)|Row(namedtuple=1)(Row)| # noqa
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
# | boolean| None| True| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | tinyint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | smallint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | int| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | bigint| None| None| 1| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | string| None| 'true'| '1'| 'a'|'java.util.Gregor...| 'java.util.Gregor...| '1.0'| '[I@66cbb73a'| '[1]'|'[Ljava.lang.Obje...| '[B@5a51eb1a'| '1'| '{a=1}'| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X| datetime.datetime...| X| X| X| X| X| X| X| X| X| # noqa
# | float| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | double| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | array<int>| None| None| None| None| None| None| None| [1]| [1]| [1]| [65, 66, 67]| None| None| X| X| # noqa
# | binary| None| None| None|bytearray(b'a')| None| None| None| None| None| None| bytearray(b'ABC')| None| None| X| X| # noqa
# | decimal(10,0)| None| None| None| None| None| None| None| None| None| None| None|Decimal('1')| None| X| X| # noqa
# | map<string,int>| None| None| None| None| None| None| None| None| None| None| None| None| {'a': 1}| X| X| # noqa
# | struct<_1:int>| None| X| X| X| X| X| X| X|Row(_1=1)| Row(_1=1)| X| X| Row(_1=None)| Row(_1=1)| Row(_1=1)| # noqa
# +-----------------------------+--------------+----------+------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+----------------------------+------------+--------------+------------------+----------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: 'X' means it throws an exception during the conversion.
# Note: Python 3.7.3 is used.
# decorator @udf, @udf(), @udf(dataType())
if f is None or isinstance(f, (str, DataType)):
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
return functools.partial(_create_udf, returnType=return_type,
evalType=PythonEvalType.SQL_BATCHED_UDF)
else:
return _create_udf(f=f, returnType=returnType,
evalType=PythonEvalType.SQL_BATCHED_UDF)
@since(2.3)
def pandas_udf(f=None, returnType=None, functionType=None):
"""
Creates a vectorized user defined function (UDF).
:param f: user-defined function. A python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
:param functionType: an enum value in :class:`pyspark.sql.functions.PandasUDFType`.
Default: SCALAR.
The function type of the UDF can be one of the following:
1. SCALAR
A scalar UDF defines a transformation: One or more `pandas.Series` -> A `pandas.Series`.
The length of the returned `pandas.Series` must be of the same as the input `pandas.Series`.
If the return type is :class:`StructType`, the returned value should be a `pandas.DataFrame`.
:class:`MapType`, nested :class:`StructType` are currently not supported as output types.
Scalar UDFs can be used with :meth:`pyspark.sql.DataFrame.withColumn` and
:meth:`pyspark.sql.DataFrame.select`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql.types import IntegerType, StringType
>>> slen = pandas_udf(lambda s: s.str.len(), IntegerType()) # doctest: +SKIP
>>> @pandas_udf(StringType()) # doctest: +SKIP
... def to_upper(s):
... return s.str.upper()
...
>>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP
... def add_one(x):
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)],
... ("id", "name", "age")) # doctest: +SKIP
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")) \\
... .show() # doctest: +SKIP
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
>>> @pandas_udf("first string, last string") # doctest: +SKIP
... def split_expand(n):
... return n.str.split(expand=True)
>>> df.select(split_expand("name")).show() # doctest: +SKIP
+------------------+
|split_expand(name)|
+------------------+
| [John, Doe]|
+------------------+
.. note:: The length of `pandas.Series` within a scalar UDF is not that of the whole input
column, but is the length of an internal batch used for each call to the function.
Therefore, this can be used, for example, to ensure the length of each returned
`pandas.Series`, and can not be used as the column length.
2. SCALAR_ITER
A scalar iterator UDF is semantically the same as the scalar Pandas UDF above except that the
wrapped Python function takes an iterator of batches as input instead of a single batch and,
instead of returning a single output batch, it yields output batches or explicitly returns an
generator or an iterator of output batches.
It is useful when the UDF execution requires initializing some state, e.g., loading a machine
learning model file to apply inference to every input batch.
.. note:: It is not guaranteed that one invocation of a scalar iterator UDF will process all
batches from one partition, although it is currently implemented this way.
Your code shall not rely on this behavior because it might change in the future for
further optimization, e.g., one invocation processes multiple partitions.
Scalar iterator UDFs are used with :meth:`pyspark.sql.DataFrame.withColumn` and
:meth:`pyspark.sql.DataFrame.select`.
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import col, pandas_udf, struct, PandasUDFType
>>> pdf = pd.DataFrame([1, 2, 3], columns=["x"]) # doctest: +SKIP
>>> df = spark.createDataFrame(pdf) # doctest: +SKIP
When the UDF is called with a single column that is not `StructType`, the input to the
underlying function is an iterator of `pd.Series`.
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def plus_one(batch_iter):
... for x in batch_iter:
... yield x + 1
...
>>> df.select(plus_one(col("x"))).show() # doctest: +SKIP
+-----------+
|plus_one(x)|
+-----------+
| 2|
| 3|
| 4|
+-----------+
When the UDF is called with more than one columns, the input to the underlying function is an
iterator of `pd.Series` tuple.
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def multiply_two_cols(batch_iter):
... for a, b in batch_iter:
... yield a * b
...
>>> df.select(multiply_two_cols(col("x"), col("x"))).show() # doctest: +SKIP
+-----------------------+
|multiply_two_cols(x, x)|
+-----------------------+
| 1|
| 4|
| 9|
+-----------------------+
When the UDF is called with a single column that is `StructType`, the input to the underlying
function is an iterator of `pd.DataFrame`.
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def multiply_two_nested_cols(pdf_iter):
... for pdf in pdf_iter:
... yield pdf["a"] * pdf["b"]
...
>>> df.select(
... multiply_two_nested_cols(
... struct(col("x").alias("a"), col("x").alias("b"))
... ).alias("y")
... ).show() # doctest: +SKIP
+---+
| y|
+---+
| 1|
| 4|
| 9|
+---+
In the UDF, you can initialize some states before processing batches, wrap your code with
`try ... finally ...` or use context managers to ensure the release of resources at the end
or in case of early termination.
>>> y_bc = spark.sparkContext.broadcast(1) # doctest: +SKIP
>>> @pandas_udf("long", PandasUDFType.SCALAR_ITER) # doctest: +SKIP
... def plus_y(batch_iter):
... y = y_bc.value # initialize some state
... try:
... for x in batch_iter:
... yield x + y
... finally:
... pass # release resources here, if any
...
>>> df.select(plus_y(col("x"))).show() # doctest: +SKIP
+---------+
|plus_y(x)|
+---------+
| 2|
| 3|
| 4|
+---------+
3. GROUPED_MAP
A grouped map UDF defines transformation: A `pandas.DataFrame` -> A `pandas.DataFrame`
The returnType should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined returnType schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
Grouped map UDFs are used with :meth:`pyspark.sql.GroupedData.apply`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
Alternatively, the user can define a function that takes two arguments.
In this case, the grouping key(s) will be passed as the first argument and the data will
be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy
data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in
as a `pandas.DataFrame` containing all columns from the original Spark DataFrame.
This is useful when the user does not want to hardcode grouping key(s) in the function.
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def mean_udf(key, pdf):
... # key is a tuple of one numpy.int64, which is the value
... # of 'id' for the current group
... return pd.DataFrame([key + (pdf.v.mean(),)])
>>> df.groupby('id').apply(mean_udf).show() # doctest: +SKIP
+---+---+
| id| v|
+---+---+
| 1|1.5|
| 2|6.0|
+---+---+
>>> @pandas_udf(
... "id long, `ceil(v / 2)` long, v double",
... PandasUDFType.GROUPED_MAP) # doctest: +SKIP
>>> def sum_udf(key, pdf):
... # key is a tuple of two numpy.int64s, which is the values
... # of 'id' and 'ceil(df.v / 2)' for the current group
... return pd.DataFrame([key + (pdf.v.sum(),)])
>>> df.groupby(df.id, ceil(df.v / 2)).apply(sum_udf).show() # doctest: +SKIP
+---+-----------+----+
| id|ceil(v / 2)| v|
+---+-----------+----+
| 2| 5|10.0|
| 1| 1| 3.0|
| 2| 3| 5.0|
| 2| 2| 3.0|
+---+-----------+----+
.. note:: If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
.. seealso:: :meth:`pyspark.sql.GroupedData.apply`
4. GROUPED_AGG
A grouped aggregate UDF defines a transformation: One or more `pandas.Series` -> A scalar
The `returnType` should be a primitive data type, e.g., :class:`DoubleType`.
The returned scalar can be either a python primitive type, e.g., `int` or `float`
or a numpy data type, e.g., `numpy.int64` or `numpy.float64`.
:class:`MapType` and :class:`StructType` are currently not supported as output types.
Group aggregate UDFs are used with :meth:`pyspark.sql.GroupedData.agg` and
:class:`pyspark.sql.Window`
This example shows using grouped aggregated UDFs with groupby:
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> df.groupby("id").agg(mean_udf(df['v'])).show() # doctest: +SKIP
+---+-----------+
| id|mean_udf(v)|
+---+-----------+
| 1| 1.5|
| 2| 6.0|
+---+-----------+
This example shows using grouped aggregated UDFs as window functions.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql import Window
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> w = (Window.partitionBy('id')
... .orderBy('v')
... .rowsBetween(-1, 0))
>>> df.withColumn('mean_v', mean_udf(df['v']).over(w)).show() # doctest: +SKIP
+---+----+------+
| id| v|mean_v|
+---+----+------+
| 1| 1.0| 1.0|
| 1| 2.0| 1.5|
| 2| 3.0| 3.0|
| 2| 5.0| 4.0|
| 2|10.0| 7.5|
+---+----+------+
.. note:: For performance reasons, the input series to window functions are not copied.
Therefore, mutating the input series is not allowed and will cause incorrect results.
For the same reason, users should also not rely on the index of the input series.
.. seealso:: :meth:`pyspark.sql.GroupedData.agg` and :class:`pyspark.sql.Window`
5. MAP_ITER
A map iterator Pandas UDFs are used to transform data with an iterator of batches.
It can be used with :meth:`pyspark.sql.DataFrame.mapInPandas`.
It can return the output of arbitrary length in contrast to the scalar Pandas UDF.
It maps an iterator of batches in the current :class:`DataFrame` using a Pandas user-defined
function and returns the result as a :class:`DataFrame`.
The user-defined function should take an iterator of `pandas.DataFrame`\\s and return another
iterator of `pandas.DataFrame`\\s. All columns are passed together as an
iterator of `pandas.DataFrame`\\s to the user-defined function and the returned iterator of
`pandas.DataFrame`\\s are combined as a :class:`DataFrame`.
>>> df = spark.createDataFrame([(1, 21), (2, 30)],
... ("id", "age")) # doctest: +SKIP
>>> @pandas_udf(df.schema, PandasUDFType.MAP_ITER) # doctest: +SKIP
... def filter_func(batch_iter):
... for pdf in batch_iter:
... yield pdf[pdf.id == 1]
>>> df.mapInPandas(filter_func).show() # doctest: +SKIP
+---+---+
| id|age|
+---+---+
| 1| 21|
+---+---+
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> @pandas_udf('double', PandasUDFType.SCALAR) # doctest: +SKIP
... def random(v):
... import numpy as np
... import pandas as pd
... return pd.Series(np.random.randn(len(v))
>>> random = random.asNondeterministic() # doctest: +SKIP
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
.. note:: The data type of returned `pandas.Series` from the user-defined functions should be
matched with defined returnType (see :meth:`types.to_arrow_type` and
:meth:`types.from_arrow_type`). When there is mismatch between them, Spark might do
conversion on returned data. The conversion is not guaranteed to be correct and results
should be checked for accuracy by users.
"""
# The following table shows most of Pandas data and SQL type conversions in Pandas UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-28132's PR to see the codes in order to generate the table below.
#
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# |SQL Type \ Pandas Value(Type)|None(object(NoneType))| True(bool)| 1(int8)| 1(int16)| 1(int32)| 1(int64)| 1(uint8)| 1(uint16)| 1(uint32)| 1(uint64)| 1.0(float16)| 1.0(float32)| 1.0(float64)|1970-01-01 00:00:00(datetime64[ns])|1970-01-01 00:00:00-05:00(datetime64[ns, US/Eastern])|a(object(string))| 1(object(Decimal))|[1 2 3](object(array[int32]))| 1.0(float128)|(1+0j)(complex64)|(1+0j)(complex128)|A(category)|1 days 00:00:00(timedelta64[ns])| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# | boolean| None| True| True| True| True| True| True| True| True| True| True| True| True| X| X| X| X| X| X| X| X| X| X| # noqa
# | tinyint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| 0| X| # noqa
# | smallint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | int| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| X| X| X| 1| X| X| X| X| X| X| # noqa
# | bigint| None| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 1| 0| 18000000000000| X| 1| X| X| X| X| X| X| # noqa
# | float| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | double| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| X| X| X| X| X| X| X| X| datetime.date(197...| datetime.date(197...| X|datetime.date(197...| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X|datetime.datetime...| X| X| X| X| X| X| X| datetime.datetime...| datetime.datetime...| X|datetime.datetime...| X| X| X| X| X| X| # noqa
# | string| None| ''| ''| ''| '\x01'| '\x01'| ''| ''| '\x01'| '\x01'| ''| ''| ''| X| X| 'a'| X| X| ''| X| ''| X| X| # noqa
# | decimal(10,0)| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| Decimal('1')| X| X| X| X| X| X| # noqa
# | array<int>| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| [1, 2, 3]| X| X| X| X| X| # noqa
# | map<string,int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | struct<_1:int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | binary| None|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')| bytearray(b'\x01')| bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'\x01')|bytearray(b'')|bytearray(b'')|bytearray(b'')| bytearray(b'')| bytearray(b'')| bytearray(b'a')| X| X|bytearray(b'')| bytearray(b'')| bytearray(b'')| X| bytearray(b'')| # noqa
# +-----------------------------+----------------------+------------------+------------------+------------------+--------------------+--------------------+------------------+------------------+------------------+------------------+--------------+--------------+--------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+--------------+-----------------+------------------+-----------+--------------------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: Python 3.7.3, Pandas 0.24.2 and PyArrow 0.13.0 are used.
# Note: Timezone is KST.
# Note: 'X' means it throws an exception during the conversion.
# decorator @pandas_udf(returnType, functionType)
is_decorator = f is None or isinstance(f, (str, DataType))
if is_decorator:
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
if functionType is not None:
# @pandas_udf(dataType, functionType=functionType)
# @pandas_udf(returnType=dataType, functionType=functionType)
eval_type = functionType
elif returnType is not None and isinstance(returnType, int):
# @pandas_udf(dataType, functionType)
eval_type = returnType
else:
# @pandas_udf(dataType) or @pandas_udf(returnType=dataType)
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
else:
return_type = returnType
if functionType is not None:
eval_type = functionType
else:
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
if return_type is None:
raise ValueError("Invalid returnType: returnType can not be None")
if eval_type not in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF]:
raise ValueError("Invalid functionType: "
"functionType must be one the values from PandasUDFType")
if is_decorator:
return functools.partial(_create_udf, returnType=return_type, evalType=eval_type)
else:
return _create_udf(f=f, returnType=return_type, evalType=eval_type)
blacklist = ['map', 'since', 'ignore_unicode_prefix']
__all__ = [k for k, v in globals().items()
if not k.startswith('_') and k[0].islower() and callable(v) and k not in blacklist]
__all__ += ["PandasUDFType"]
__all__.sort()
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.functions
globs = pyspark.sql.functions.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.functions tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.createDataFrame([Row(name='Alice', age=2), Row(name='Bob', age=5)])
spark.conf.set("spark.sql.legacy.utcTimestampFunc.enabled", "true")
(failure_count, test_count) = doctest.testmod(
pyspark.sql.functions, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.conf.unset("spark.sql.legacy.utcTimestampFunc.enabled")
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
illusionww/docker-jupyter-nn-tools | jupyter_notebook_config.py | 2 | 21300 | # Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Configurable configuration
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# LoggingConfigurable configuration
#------------------------------------------------------------------------------
# A parent class for Configurables that log.
#
# Subclasses have a log trait, and the default behavior is to get the logger
# from the currently running Application.
#------------------------------------------------------------------------------
# SingletonConfigurable configuration
#------------------------------------------------------------------------------
# A configurable that only allows one instance.
#
# This class is for classes that should only have one instance of itself or
# *any* subclass. To create and retrieve such a class use the
# :meth:`SingletonConfigurable.instance` method.
#------------------------------------------------------------------------------
# Application configuration
#------------------------------------------------------------------------------
# This is an application.
# The date format used by logging formatters for %(asctime)s
# c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
# The Logging format template
# c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Set the log level by value or name.
# c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp configuration
#------------------------------------------------------------------------------
# Base class for Jupyter applications
# Answer yes to any prompts.
# c.JupyterApp.answer_yes = False
# Full path of a config file.
# c.JupyterApp.config_file = u''
# Specify a config file to load.
# c.JupyterApp.config_file_name = u''
# Generate default config file.
# c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp configuration
#------------------------------------------------------------------------------
# Set the Access-Control-Allow-Credentials: true header
# c.NotebookApp.allow_credentials = False
# Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
# c.NotebookApp.allow_origin = ''
# Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
# c.NotebookApp.allow_origin_pat = ''
# DEPRECATED use base_url
# c.NotebookApp.base_project_url = '/'
# The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
# c.NotebookApp.base_url = '/'
# Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
# c.NotebookApp.browser = u''
# The full path to an SSL/TLS certificate file.
# c.NotebookApp.certfile = u'/jupyter/certs/server.crt'
# The full path to a certificate authority certifificate for SSL/TLS client
# authentication.
# c.NotebookApp.client_ca = u''
# The config manager class to use
# c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
# The notebook manager class to use.
# c.NotebookApp.contents_manager_class = 'notebook.services.contents.filemanager.FileContentsManager'
# Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
# c.NotebookApp.cookie_options = {}
# The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
# c.NotebookApp.cookie_secret = ''
# The file where the cookie secret is stored.
# c.NotebookApp.cookie_secret_file = u''
# The default URL to redirect to from `/`
# c.NotebookApp.default_url = '/tree'
# Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
# c.NotebookApp.enable_mathjax = True
# extra paths to look for Javascript notebook extensions
# c.NotebookApp.extra_nbextensions_path = []
# Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
# c.NotebookApp.extra_static_paths = []
# Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
# c.NotebookApp.extra_template_paths = []
#
# c.NotebookApp.file_to_run = ''
# Use minified JS file or not, mainly use during dev to avoid JS recompilation
# c.NotebookApp.ignore_minified_js = False
# (bytes/sec) Maximum rate at which messages can be sent on iopub before they
# are limited.
# c.NotebookApp.iopub_data_rate_limit = 0
# (msg/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
# c.NotebookApp.iopub_msg_rate_limit = 0
# The IP address the notebook server will listen on.
c.NotebookApp.ip = '*'
# Supply extra arguments that will be passed to Jinja environment.
# c.NotebookApp.jinja_environment_options = {}
# Extra variables to supply to jinja templates when rendering.
# c.NotebookApp.jinja_template_vars = {}
# The kernel manager class to use.
# c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
# The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
# c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
# The full path to a private key file for usage with SSL/TLS.
# c.NotebookApp.keyfile = u'/jupyter/certs/server.key'
# The login handler class to use.
# c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
# The logout handler class to use.
# c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
# The url for MathJax.js.
# c.NotebookApp.mathjax_url = ''
# Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions.
# c.NotebookApp.nbserver_extensions = {}
# The directory to use for notebooks and kernels.
# c.NotebookApp.notebook_dir = u''
# Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
# c.NotebookApp.open_browser = False
# Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
# c.NotebookApp.password = u''
# The port the notebook server will listen on.
# c.NotebookApp.port = 8888
# The number of additional ports to try if the specified port is not available.
# c.NotebookApp.port_retries = 50
# DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'inline'
# (sec) Time window used to check the message and data rate limits.
# c.NotebookApp.rate_limit_window = 1.0
# Reraise exceptions encountered loading server extensions?
# c.NotebookApp.reraise_server_extension_failures = False
# DEPRECATED use the nbserver_extensions dict instead
# c.NotebookApp.server_extensions = []
# The session manager class to use.
# c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
# Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
# c.NotebookApp.ssl_options = {}
# Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
# c.NotebookApp.tornado_settings = {}
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
# c.NotebookApp.trust_xheaders = False
# DEPRECATED, use tornado_settings
# c.NotebookApp.webapp_settings = {}
# The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
# c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin configuration
#------------------------------------------------------------------------------
# Mixin for configurable classes that work with connection files
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ConnectionFileMixin.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.ConnectionFileMixin.control_port = 0
# set the heartbeat port [default: random]
# c.ConnectionFileMixin.hb_port = 0
# set the iopub (PUB) port [default: random]
# c.ConnectionFileMixin.iopub_port = 0
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ConnectionFileMixin.ip = u''
# set the shell (ROUTER) port [default: random]
# c.ConnectionFileMixin.shell_port = 0
# set the stdin (ROUTER) port [default: random]
# c.ConnectionFileMixin.stdin_port = 0
#
# c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = True
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
# c.KernelManager.kernel_cmd = []
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
# c.Session.check_pid = True
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# Debug output in the Session
# c.Session.debug = False
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# execution key, for signing messages.
# c.Session.key = ''
# path to file containing execution key.
# c.Session.keyfile = ''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# The UUID identifying this session.
# c.Session.session = u''
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = u'exuser'
#------------------------------------------------------------------------------
# MultiKernelManager configuration
#------------------------------------------------------------------------------
# A class for managing multiple kernels.
# The name of the default kernel to start
# c.MultiKernelManager.default_kernel_name = 'python2'
# The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
# c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager configuration
#------------------------------------------------------------------------------
# A KernelManager that handles notebook mapping and HTTP error handling
#
# c.MappingKernelManager.root_dir = u''
#------------------------------------------------------------------------------
# ContentsManager configuration
#------------------------------------------------------------------------------
# Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
#
# c.ContentsManager.checkpoints = None
#
# c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
#
# c.ContentsManager.checkpoints_kwargs = {}
# Glob patterns to hide in file and directory listings.
# c.ContentsManager.hide_globs = [u'__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
# Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
# c.ContentsManager.pre_save_hook = None
# The base name used when creating untitled directories.
# c.ContentsManager.untitled_directory = 'Untitled Folder'
# The base name used when creating untitled files.
# c.ContentsManager.untitled_file = 'untitled'
# The base name used when creating untitled notebooks.
# c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin configuration
#------------------------------------------------------------------------------
# Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
# By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
# c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager configuration
#------------------------------------------------------------------------------
# Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
# c.FileContentsManager.post_save_hook = None
#
# c.FileContentsManager.root_dir = u''
# DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
# c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary configuration
#------------------------------------------------------------------------------
# A class for computing and verifying notebook signatures.
# The hashing algorithm used to sign notebooks.
# c.NotebookNotary.algorithm = 'sha256'
# The number of notebook signatures to cache. When the number of signatures
# exceeds this value, the oldest 25% of signatures will be culled.
# c.NotebookNotary.cache_size = 65535
# The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter runtime directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
# c.NotebookNotary.db_file = u''
# The secret key with which notebooks are signed.
# c.NotebookNotary.secret = ''
# The file where the secret key is stored.
# c.NotebookNotary.secret_file = u''
#------------------------------------------------------------------------------
# KernelSpecManager configuration
#------------------------------------------------------------------------------
# If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
# c.KernelSpecManager.ensure_native_kernel = True
# The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
# c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
# Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
# c.KernelSpecManager.whitelist = set([])
| mit |
abhishekkrthakur/scikit-learn | sklearn/cluster/tests/test_spectral.py | 11 | 7958 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
"""Histogram kernel implemented as a callable."""
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
roxyboy/scikit-learn | sklearn/linear_model/tests/test_ransac.py | 216 | 13290 | import numpy as np
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises_regexp
from scipy import sparse
from sklearn.utils.testing import assert_less
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
| bsd-3-clause |
jrbourbeau/cr-composition | plotting/plot_yearly_BDT_scores.py | 1 | 4376 | #!/usr/bin/env python
from __future__ import division, print_function
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import multiprocessing as mp
from sklearn.externals import joblib
import comptools as comp
import comptools.analysis.plotting as plotting
color_dict = comp.analysis.get_color_dict()
def get_BDT_scores(config):
df_data = comp.load_dataframe(datatype='data', config=config)
feature_list, feature_labels = comp.get_training_features()
df_data.loc[:, feature_list].dropna(axis=0, how='any', inplace=True)
# Load saved pipeline
model_file = os.path.join(comp.paths.project_home, 'models/GBDT_IC86.2012.pkl')
pipeline = joblib.load(model_file)['pipeline']
# Get BDT scores for each data event
X_data = comp.dataframe_functions.dataframe_to_array(df_data, feature_list)
classifier_scores = pipeline.decision_function(X_data)
print('{} complete!'.format(config))
return classifier_scores
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Extracts and saves desired information from simulation/data .i3 files')
parser.add_argument('-c', '--config', dest='config', nargs='*',
choices=comp.datafunctions.get_data_configs(),
help='Detector configuration')
args = parser.parse_args()
# Energy distribution comparison plot
score_pool = mp.Pool(processes=len(args.config))
scores = score_pool.map(get_BDT_scores, args.config)
config_scores_dict = dict(zip(args.config, scores))
# fig, ax = plt.subplots()
min_score, max_score = -3, 3
score_bins = np.linspace(min_score, max_score, 25)
# for idx, config in enumerate(args.config):
# score_counts = np.histogram(config_scores_dict[config], bins=score_bins)[0]
# score_freq = score_counts/np.sum(score_counts)
# score_freq_err = np.sqrt(score_counts)/np.sum(score_counts)
# plotting.plot_steps(score_bins, score_freq, yerr=score_freq_err,
# color='C{}'.format(idx), label=config, alpha=0.8,
# ax=ax)
#
# ax.set_ylabel('Frequency')
# ax.set_xlabel('BDT score')
# ax.set_ylim(0)
# ax.set_xlim(min_score, max_score)
# ax.grid()
# ax.legend()
gs = gridspec.GridSpec(2, 1, height_ratios=[1,1], hspace=0.1)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1], sharex=ax1)
for idx, config in enumerate(args.config):
score_counts = np.histogram(config_scores_dict[config], bins=score_bins)[0]
score_freq = score_counts/np.sum(score_counts)
score_freq_err = np.sqrt(score_counts)/np.sum(score_counts)
plotting.plot_steps(score_bins, score_freq, yerr=score_freq_err,
color='C{}'.format(idx), label=config, alpha=0.8,
ax=ax1)
ax1.set_ylabel('Frequency')
ax1.tick_params(labelbottom='off')
ax1.grid()
ax1.legend()
for idx, config in enumerate(args.config):
if config == 'IC86.2012': continue
score_counts = np.histogram(config_scores_dict[config], bins=score_bins)[0]
score_freq = score_counts/np.sum(score_counts)
score_freq_err = np.sqrt(score_counts)/np.sum(score_counts)
score_counts_2012 = np.histogram(config_scores_dict['IC86.2012'], bins=score_bins)[0]
score_freq_2012 = score_counts_2012/np.sum(score_counts_2012)
score_freq_err_2012 = np.sqrt(score_counts_2012)/np.sum(score_counts_2012)
ratio, ratio_err = comp.analysis.ratio_error(score_freq, score_freq_err,
score_freq_2012, score_freq_err_2012)
plotting.plot_steps(score_bins, ratio, yerr=ratio_err,
color='C{}'.format(idx), label=config, alpha=0.8,
ax=ax2)
ax2.axhline(1, marker='None', linestyle='-.', color='k', lw=1.5)
ax2.set_ylabel('$\mathrm{f/f_{2012}}$')
# ax2.set_ylabel('Ratio with IC86.2012')
ax2.set_xlabel('BDT score')
# ax2.set_ylim(0)
ax2.set_xlim(min_score, max_score)
ax2.grid()
score_outfile = os.path.join(comp.paths.figures_dir,
'yearly_data_comparisons', 'BDT_scores.png')
comp.check_output_dir(score_outfile)
plt.savefig(score_outfile)
| mit |
marcocaccin/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 6 | 24655 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.datasets import make_regression
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr", "sag"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver in ("cholesky", "sag"):
# Currently the only solvers to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-8).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky', 'sag']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:-1])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5, fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3, fit_intercept=False)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
# ignore warning that solvers are changed to SAG for
# temporary fix
@ignore_warnings
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd', fit_intercept=False)
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
@ignore_warnings
def test_n_iter():
# Test that self.n_iter_ is correct.
n_targets = 2
X, y = X_diabetes, y_diabetes
y_n = np.tile(y, (n_targets, 1)).T
for max_iter in range(1, 4):
for solver in ('sag', 'lsqr'):
reg = Ridge(solver=solver, max_iter=max_iter, tol=1e-12)
reg.fit(X, y_n)
assert_array_equal(reg.n_iter_, np.tile(max_iter, n_targets))
for solver in ('sparse_cg', 'svd', 'cholesky'):
reg = Ridge(solver=solver, max_iter=1, tol=1e-1)
reg.fit(X, y_n)
assert_equal(reg.n_iter_, None)
def test_ridge_fit_intercept_sparse():
X, y = make_regression(n_samples=1000, n_features=2, n_informative=2,
bias=10., random_state=42)
X_csr = sp.csr_matrix(X)
dense = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
sparse = Ridge(alpha=1., tol=1.e-15, solver='sag', fit_intercept=True)
dense.fit(X, y)
sparse.fit(X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
# test the solver switch and the corresponding warning
sparse = Ridge(alpha=1., tol=1.e-15, solver='lsqr', fit_intercept=True)
assert_warns(UserWarning, sparse.fit, X_csr, y)
assert_almost_equal(dense.intercept_, sparse.intercept_)
assert_array_almost_equal(dense.coef_, sparse.coef_)
| bsd-3-clause |
spreg-git/pysal | pysal/contrib/spint/tests/test_gravity_stats.py | 8 | 12472 | """
Tests for statistics for gravity-style spatial interaction models
"""
__author__ = 'toshan'
import unittest
import numpy as np
import pandas as pd
import gravity as grav
import mle_stats as stats
class SingleParameter(unittest.TestCase):
"""Unit tests statistics when there is a single parameters estimated"""
def setUp(self):
self.f = np.array([0, 6469, 7629, 20036, 4690,
6194, 11688, 2243, 8857, 7248,
3559, 9221, 10099, 22866, 3388,
9986, 46618, 11639, 1380, 5261,
5985, 6731, 2704, 12250, 16132])
self.o = np.repeat(1, 25)
self.d = np.array(range(1, 26))
self.dij = np.array([0, 576, 946, 597, 373,
559, 707, 1208, 602, 692,
681, 1934, 332, 595, 906,
425, 755, 672, 1587, 526,
484, 2141, 2182, 410, 540])
self.pop = np.array([1596000, 2071000, 3376000, 6978000, 1345000,
2064000, 2378000, 1239000, 4435000, 1999000,
1274000, 7042000, 834000, 1268000, 1965000,
1046000, 12131000, 4824000, 969000, 2401000,
2410000, 2847000, 1425000, 1089000, 2909000])
self.dt = pd.DataFrame({'origins': self.o,
'destinations': self.d,
'pop': self.pop,
'Dij': self.dij,
'flows': self.f})
def test_single_parameter(self):
model = grav.ProductionConstrained(self.dt, 'origins', 'destinations', 'flows',
['pop'], 'Dij', 'pow')
ss = {'obs_mean_trip_len': 736.52834197296534,
'pred_mean_trip_len': 734.40974204773784,
'OD_pairs': 24,
'predicted_flows': 242873.00000000003,
'avg_dist_trav': 737.0,
'num_destinations': 24,
'observed_flows': 242873,
'avg_dist': 851.0,
'num_origins': 1}
ps = {'beta': {'LL_zero_val': -3.057415839736517,
'relative_likelihood_stat': 24833.721614296166,
'standard_error': 0.0052734418614330883},
'all_params': {'zero_vals_LL': -3.1780538303479453,
'mle_vals_LL': -3.0062909275101761},
'pop': {'LL_zero_val': -3.1773474269437778,
'relative_likelihood_stat': 83090.010373874276,
'standard_error': 0.0027673052892085684}}
fs = {'r_squared': 0.60516003720997413,
'srmse': 0.57873206718148507}
es = {'pred_obs_deviance': 0.1327,
'entropy_ratio': 0.5642,
'maximum_entropy': 3.1781,
'max_pred_deviance': 0.1718,
'variance_obs_entropy': 2.55421e-06,
'predicted_entropy': 3.0063,
't_stat_entropy': 66.7614,
'max_obs_deviance': 0.3045,
'observed_entropy': 2.8736,
'variance_pred_entropy': 1.39664e-06}
sys_stats = stats.sys_stats(model)
self.assertAlmostEqual(model.system_stats['obs_mean_trip_len'], ss['obs_mean_trip_len'], 4)
self.assertAlmostEqual(model.system_stats['pred_mean_trip_len'], ss['pred_mean_trip_len'], 4)
self.assertAlmostEqual(model.system_stats['OD_pairs'], ss['OD_pairs'])
self.assertAlmostEqual(model.system_stats['predicted_flows'], ss['predicted_flows'])
self.assertAlmostEqual(model.system_stats['avg_dist_trav'], ss['avg_dist_trav'])
self.assertAlmostEqual(model.system_stats['num_destinations'], ss['num_destinations'])
self.assertAlmostEqual(model.system_stats['observed_flows'], ss['observed_flows'])
self.assertAlmostEqual(model.system_stats['avg_dist'], ss['avg_dist'], 4)
self.assertAlmostEqual(model.system_stats['num_origins'], ss['num_origins'])
param_stats = stats.param_stats(model)
self.assertAlmostEqual(model.parameter_stats['beta']['LL_zero_val'], ps['beta']['LL_zero_val'], 4)
self.assertAlmostEqual(model.parameter_stats['beta']['relative_likelihood_stat'],
ps['beta']['relative_likelihood_stat'], 4)
self.assertAlmostEqual(model.parameter_stats['beta']['standard_error'], ps['beta']['standard_error'], 4)
self.assertAlmostEqual(model.parameter_stats['pop']['LL_zero_val'], ps['pop']['LL_zero_val'], 4)
self.assertAlmostEqual(model.parameter_stats['pop']['relative_likelihood_stat'],
ps['pop']['relative_likelihood_stat'], 4)
self.assertAlmostEqual(model.parameter_stats['pop']['standard_error'], ps['pop']['standard_error'], 4)
self.assertAlmostEqual(model.parameter_stats['all_params']['zero_vals_LL'], ps['all_params']['zero_vals_LL'], 4)
self.assertAlmostEqual(model.parameter_stats['all_params']['mle_vals_LL'], ps['all_params']['mle_vals_LL'], 4)
fit_stats = stats.fit_stats(model)
self.assertAlmostEqual(model.fit_stats['r_squared'], fs['r_squared'], 4)
self.assertAlmostEqual(model.fit_stats['srmse'], fs['srmse'], 4)
ent_stats = stats.ent_stats(model)
self.assertAlmostEqual(model.entropy_stats['pred_obs_deviance'], es['pred_obs_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['entropy_ratio'], es['entropy_ratio'], 4)
self.assertAlmostEqual(model.entropy_stats['maximum_entropy'], es['maximum_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['max_pred_deviance'], es['max_pred_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['variance_obs_entropy'], es['variance_obs_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['predicted_entropy'], es['predicted_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['t_stat_entropy'], es['t_stat_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['max_obs_deviance'], es['max_obs_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['observed_entropy'], es['observed_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['variance_pred_entropy'], es['variance_pred_entropy'], 4)
class MultipleParameter(unittest.TestCase):
"""Unit tests statistics when there are multiple parameters estimated"""
def setUp(self):
self.f = np.array([0, 180048, 79223, 26887, 198144, 17995, 35563, 30528, 110792,
283049, 0, 300345, 67280, 718673, 55094, 93434, 87987, 268458,
87267, 237229, 0, 281791, 551483, 230788, 178517, 172711, 394481,
29877, 60681, 286580, 0, 143860, 49892, 185618, 181868, 274629,
130830, 382565, 346407, 92308, 0, 252189, 192223, 89389, 279739,
21434, 53772, 287340, 49828, 316650, 0, 141679, 27409, 87938,
30287, 64645, 161645, 144980, 199466, 121366, 0, 134229, 289880,
21450, 43749, 97808, 113683, 89806, 25574, 158006, 0, 437255,
72114, 133122, 229764, 165405, 266305, 66324, 252039, 342948, 0])
self.o = np.repeat(np.array(range(1, 10)), 9)
self.d = np.tile(np.array(range(1, 10)), 9)
self.dij = np.array([0, 219, 1009, 1514, 974, 1268, 1795, 2420, 3174,
219, 0, 831, 1336, 755, 1049, 1576, 2242, 2996,
1009, 831, 0, 505, 1019, 662, 933, 1451, 2205,
1514, 1336, 505, 0, 1370, 888, 654, 946, 1700,
974, 755, 1019, 1370, 0, 482, 1144, 2278, 2862,
1268, 1049, 662, 888, 482, 0, 662, 1795, 2380,
1795, 1576, 933, 654, 1144, 662, 0, 1287, 1779,
2420, 2242, 1451, 946, 2278, 1795, 1287, 0, 754,
3147, 2996, 2205, 1700, 2862, 2380, 1779, 754, 0])
self.dt = pd.DataFrame({'Origin': self.o,
'Destination': self.d,
'flows': self.f,
'Dij': self.dij})
def test_multiple_parameter(self):
model = grav.DoublyConstrained(self.dt, 'Origin', 'Destination', 'flows', 'Dij', 'exp')
ss = {'obs_mean_trip_len': 1250.9555521611339,
'pred_mean_trip_len': 1250.9555521684863,
'OD_pairs': 72, 'predicted_flows': 12314322.0,
'avg_dist_trav': 1251.0, 'num_destinations': 9,
'observed_flows': 12314322, 'avg_dist': 1414.0,
'num_origins': 9}
ps = {'beta': {'LL_zero_val': -4.1172103581711941,
'relative_likelihood_stat': 2053596.3814015209,
'standard_error': 4.9177433418433932e-07},
'all_params': {'zero_vals_LL': -4.1172102183395936,
'mle_vals_LL': -4.0338279201692675}}
fs = {'r_squared': 0.89682406680906979,
'srmse': 0.24804939821988789}
es = {'pred_obs_deviance': 0.0314,
'entropy_ratio': 0.8855,
'maximum_entropy': 4.2767,
'max_pred_deviance': 0.2429,
'variance_obs_entropy': 3.667e-08,
'predicted_entropy': 4.0338,
't_stat_entropy': 117.1593,
'max_obs_deviance': 0.2743,
'observed_entropy': 4.0024,
'variance_pred_entropy': 3.516e-08}
sys_stats = stats.sys_stats(model)
self.assertAlmostEqual(model.system_stats['obs_mean_trip_len'], ss['obs_mean_trip_len'], 4)
self.assertAlmostEqual(model.system_stats['pred_mean_trip_len'], ss['pred_mean_trip_len'], 4)
self.assertAlmostEqual(model.system_stats['OD_pairs'], ss['OD_pairs'])
self.assertAlmostEqual(model.system_stats['predicted_flows'], ss['predicted_flows'])
self.assertAlmostEqual(model.system_stats['avg_dist_trav'], ss['avg_dist_trav'])
self.assertAlmostEqual(model.system_stats['num_destinations'], ss['num_destinations'])
self.assertAlmostEqual(model.system_stats['observed_flows'], ss['observed_flows'])
self.assertAlmostEqual(model.system_stats['avg_dist'], ss['avg_dist'], 4)
self.assertAlmostEqual(model.system_stats['num_origins'], ss['num_origins'])
param_stats = stats.param_stats(model)
self.assertAlmostEqual(model.parameter_stats['beta']['LL_zero_val'], ps['beta']['LL_zero_val'], 4)
self.assertAlmostEqual(model.parameter_stats['beta']['relative_likelihood_stat'],
ps['beta']['relative_likelihood_stat'], 4)
self.assertAlmostEqual(model.parameter_stats['beta']['standard_error'], ps['beta']['standard_error'], 4)
self.assertAlmostEqual(model.parameter_stats['all_params']['zero_vals_LL'], ps['all_params']['zero_vals_LL'], 4)
self.assertAlmostEqual(model.parameter_stats['all_params']['mle_vals_LL'], ps['all_params']['mle_vals_LL'], 4)
fit_stats = stats.fit_stats(model)
self.assertAlmostEqual(model.fit_stats['r_squared'], fs['r_squared'], 4)
self.assertAlmostEqual(model.fit_stats['srmse'], fs['srmse'], 4)
ent_stats = stats.ent_stats(model)
self.assertAlmostEqual(model.entropy_stats['pred_obs_deviance'], es['pred_obs_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['entropy_ratio'], es['entropy_ratio'], 4)
self.assertAlmostEqual(model.entropy_stats['maximum_entropy'], es['maximum_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['max_pred_deviance'], es['max_pred_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['variance_obs_entropy'], es['variance_obs_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['predicted_entropy'], es['predicted_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['t_stat_entropy'], es['t_stat_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['max_obs_deviance'], es['max_obs_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['observed_entropy'], es['observed_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['variance_pred_entropy'], es['variance_pred_entropy'], 4)
if __name__ == '__main__':
unittest.main() | bsd-3-clause |
RobertABT/heightmap | build/matplotlib/examples/pylab_examples/text_rotation_relative_to_line.py | 9 | 1229 | #!/usr/bin/env python
"""
Text objects in matplotlib are normally rotated with respect to the
screen coordinate system (i.e., 45 degrees rotation plots text along a
line that is in between horizontal and vertical no matter how the axes
are changed). However, at times one wants to rotate text with respect
to something on the plot. In this case, the correct angle won't be
the angle of that object in the plot coordinate system, but the angle
that that object APPEARS in the screen coordinate system. This angle
is found by transforming the angle from the plot to the screen
coordinate system, as shown in the example below.
"""
from pylab import *
# Plot diagonal line (45 degrees)
h = plot( r_[:10], r_[:10] )
# set limits so that it no longer looks on screen to be 45 degrees
xlim([-10,20])
# Locations to plot text
l1 = array((1,1))
l2 = array((5,5))
# Rotate angle
angle = 45
trans_angle = gca().transData.transform_angles(array((45,)),
l2.reshape((1,2)))[0]
# Plot text
th1 = text(l1[0],l1[1],'text not rotated correctly',fontsize=16,
rotation=angle)
th2 = text(l2[0],l2[1],'text not rotated correctly',fontsize=16,
rotation=trans_angle)
show()
| mit |
EVEprosper/ProsperWarehouse | setup.py | 1 | 3216 | """Wheel for ProsperWarehouse project"""
from os import path, listdir
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
HERE = path.abspath(path.dirname(__file__))
def hack_find_packages(include_str):
"""patches setuptools.find_packages issue
setuptools.find_packages(path='') doesn't work as intended
Returns:
(:obj:`list` :obj:`str`) append <include_str>. onto every element of setuptools.find_pacakges() call
"""
new_list = [include_str]
for element in find_packages(include_str):
new_list.append(include_str + '.' + element)
return new_list
def include_all_subfiles(*args):
"""Slurps up all files in a directory (non recursive) for data_files section
Note:
Not recursive, only includes flat files
Returns:
(:obj:`list` :obj:`str`) list of all non-directories in a file
"""
file_list = []
for path_included in args:
local_path = path.join(HERE, path_included)
for file in listdir(local_path):
file_abspath = path.join(local_path, file)
if path.isdir(file_abspath): #do not include sub folders
continue
file_list.append(path_included + '/' + file)
return file_list
class PyTest(TestCommand):
"""PyTest cmdclass hook for test-at-buildtime functionality
http://doc.pytest.org/en/latest/goodpractices.html#manual-integration
"""
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ['test'] #load defaults here
def run_tests(self):
import shlex
#import here, cause outside the eggs aren't loaded
import pytest
pytest_commands = []
try: #read commandline
pytest_commands = shlex.split(self.pytest_args)
except AttributeError: #use defaults
pytest_commands = self.pytest_args
errno = pytest.main(pytest_commands)
exit(errno)
setup(
name='ProsperWarehouse',
author='John Purcell',
author_email='prospermarketshow@gmail.com',
url='https://github.com/EVEprosper/ProsperWarehouse',
download_url='https://github.com/EVEprosper/ProsperWarehouse/tarball/v0.0.3',
version='0.0.4',
license='MIT',
classifiers=[
'Programming Language :: Python :: 3.5'
],
keywords='prosper eveonline api database',
packages=hack_find_packages('prosper'),
include_package_data=True,
#data_files=[
# #TODO: license + README
# #Can't use data_files with gemfury upload (need `bdist_wheel`)
# ('SQL', include_all_subfiles('SQL')),
# ('docs', include_all_subfiles('docs'))
#],
package_data={
'prosper':[
'table_configs/table_config.cfg'
]
},
install_requires=[
'configparser==3.5.0',
'mysql-connector==2.1.4',
'numpy==1.11.1',
'pandas==0.18.1',
'plumbum==1.6.2',
'python-dateutil==2.5.3',
'pytz==2016.6.1',
'six==1.10.0',
'requests==2.11.1',
'ProsperCommon==0.3.3'
]
)
| mit |
PSRCode/lttng-ci-1 | scripts/system-tests/parse-results.py | 2 | 3858 | #! /usr/bin/python3
from subprocess import call
from collections import defaultdict
import csv
import numpy as np
import pandas as pd
import sys
def test_case(df):
# Duration is in usec
# usecPecIter = Duration/(average number of iteration per thread)
df['usecperiter'] = (df['nbthreads'] * df['duration']) / df['nbiter']
periter_mean = pd.DataFrame({'periter_mean' :
df.groupby(['nbthreads', 'tracer', 'testcase','sleeptime'])['usecperiter'].mean()}).reset_index()
periter_stdev = pd.DataFrame({'periter_stdev' :
df.groupby(['nbthreads', 'tracer', 'testcase','sleeptime'])['usecperiter'].std()}).reset_index()
nbiter_mean = pd.DataFrame({'nbiter_mean' :
df.groupby(['nbthreads', 'tracer', 'testcase','sleeptime'])['nbiter'].mean()}).reset_index()
nbiter_stdev = pd.DataFrame({'nbiter_stdev' :
df.groupby(['nbthreads', 'tracer', 'testcase','sleeptime'])['nbiter'].std()}).reset_index()
duration_mean = pd.DataFrame({'duration_mean' :
df.groupby(['nbthreads', 'tracer', 'testcase','sleeptime'])['duration'].mean()}).reset_index()
duration_stdev = pd.DataFrame({'duration_stdev' :
df.groupby(['nbthreads', 'tracer', 'testcase','sleeptime'])['duration'].std()}).reset_index()
tmp = periter_mean.merge(periter_stdev)
tmp = tmp.merge(nbiter_mean)
tmp = tmp.merge(nbiter_stdev)
tmp = tmp.merge(duration_mean)
tmp = tmp.merge(duration_stdev)
# if there is any NaN or None value in the DF we raise an exeception
if tmp.isnull().values.any():
raise Exception('NaN value found in dataframe')
for i, row in tmp.iterrows():
testcase_name='_'.join([row['tracer'],str(row['nbthreads'])+'thr', 'peritermean'])
yield( {"name": testcase_name, "result": "pass", "units": "usec/iter",
"measurement": str(row['periter_mean'])})
testcase_name='_'.join([row['tracer'],str(row['nbthreads'])+'thr', 'periterstdev'])
yield( {"name": testcase_name, "result": "pass", "units": "usec/iter",
"measurement": str(row['periter_stdev'])})
testcase_name='_'.join([row['tracer'],str(row['nbthreads'])+'thr', 'nbitermean'])
yield( {"name": testcase_name, "result": "pass", "units": "iterations",
"measurement": str(row['nbiter_mean'])})
testcase_name='_'.join([row['tracer'],str(row['nbthreads'])+'thr', 'nbiterstdev'])
yield( {"name": testcase_name, "result": "pass", "units": "iterations",
"measurement": str(row['nbiter_stdev'])})
testcase_name='_'.join([row['tracer'],str(row['nbthreads'])+'thr', 'durationmean'])
yield( {"name": testcase_name, "result": "pass", "units": "usec",
"measurement": str(row['duration_mean'])})
testcase_name='_'.join([row['tracer'],str(row['nbthreads'])+'thr', 'durationstdev'])
yield( {"name": testcase_name, "result": "pass", "units": "usec",
"measurement": str(row['duration_stdev'])})
def main():
results_file=sys.argv[1]
df = pd.read_csv(results_file)
results=defaultdict()
data = test_case(df)
for res in data:
call(
['lava-test-case',
res['name'],
'--result', res['result'],
'--measurement', res['measurement'],
'--units', res['units']])
# Save the results to write to the CSV file
results[res['name']]=res['measurement']
# Write the dictionnary to a csv file where each key is a column
with open('processed_results.csv', 'w') as output_csv:
dict_csv_write=csv.DictWriter(output_csv, results.keys())
dict_csv_write.writeheader()
dict_csv_write.writerow(results)
if __name__ == '__main__':
main()
| gpl-2.0 |
judithfan/pix2svg | generative/tests/compare_test/train.py | 1 | 11171 | from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import shutil
import numpy as np
from tqdm import tqdm
import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from sklearn.metrics import mean_squared_error
from model import PredictorFC6, PredictorCONV42, PredictorPOOL1
from dataset import VisualDataset
def save_checkpoint(state, is_best, folder='./', filename='checkpoint.pth.tar'):
if not os.path.isdir(folder):
os.mkdir(folder)
torch.save(state, os.path.join(folder, filename))
if is_best:
shutil.copyfile(os.path.join(folder, filename),
os.path.join(folder, 'model_best.pth.tar'))
def load_checkpoint(file_path, use_cuda=False):
checkpoint = torch.load(file_path) if use_cuda else \
torch.load(file_path, map_location=lambda storage, location: storage)
vgg_layer = checkpoint['vgg_layer']
assert vgg_layer in ['conv42', 'fc6', 'pool1']
if vgg_layer == 'conv42':
model = PredictorCONV42()
elif vgg_layer == 'fc6':
model = PredictorFC6()
elif vgg_layer == 'pool1':
model = PredictorPOOL1()
model.load_state_dict(checkpoint['state_dict'])
model.vgg_layer = vgg_layer
return model
def cross_entropy(input, soft_targets):
return torch.mean(torch.sum(- soft_targets * F.log_softmax(input, dim=1), dim=1))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('vgg_layer', type=str, help='conv42|fc6|pool1')
parser.add_argument('--loss-scale', type=float, default=10000., help='multiplier for loss [default: 10000.]')
parser.add_argument('--out-dir', type=str, default='./trained_models',
help='where to save checkpoints [./trained_models]')
parser.add_argument('--batch-size', type=int, default=10,
help='number of examples in a mini-batch [default: 10]')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate [default: 1e-4]')
parser.add_argument('--epochs', type=int, default=100, help='number of epochs [default: 100]')
parser.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
assert args.vgg_layer in ['conv42', 'fc6', 'pool1']
train_dataset = VisualDataset(layer=args.vgg_layer, split='train')
val_dataset = VisualDataset(layer=args.vgg_layer, split='val')
test_dataset = VisualDataset(layer=args.vgg_layer, split='test')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False)
if args.vgg_layer == 'conv42':
model = PredictorCONV42()
elif args.vgg_layer == 'fc6':
model = PredictorFC6()
elif args.vgg_layer == 'pool1':
model = PredictorPOOL1()
if args.cuda:
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-4)
def train(epoch):
model.train()
loss_meter = AverageMeter()
mse_meter = AverageMeter()
for batch_idx, (sketch, label) in enumerate(train_loader):
sketch = Variable(sketch)
label = Variable(label)
batch_size = len(sketch)
if args.cuda:
sketch = sketch.cuda()
label = label.cuda()
# set optimizer defaults to 0
optimizer.zero_grad()
pred_logits = []
photo_generator = train_dataset.gen_photos()
for photo in photo_generator():
photo = Variable(photo)
if args.cuda:
photo = photo.cuda()
if args.vgg_layer == 'fc6':
photo = photo.repeat(batch_size, 1)
else:
photo = photo.repeat(batch_size, 1, 1, 1)
pred_logit = model(photo, sketch)
pred_logits.append(pred_logit)
pred_logits = torch.cat(pred_logits, dim=1)
pred = F.softmax(pred_logits, dim=1)
loss = args.loss_scale * cross_entropy(pred_logits, label.float())
loss_meter.update(loss.data[0], batch_size)
if batch_idx % 10 == 0:
pred_npy = pred.cpu().data.numpy()[0]
label_npy = label.cpu().data.numpy()[0]
sort_npy = np.argsort(label_npy)[::-1]
print(zip(label_npy[sort_npy], pred_npy[sort_npy]))
label_np = label.cpu().data.numpy()
pred_np = pred.cpu().data.numpy()
mse = mean_squared_error(label_np, pred_np)
mse_meter.update(mse, batch_size)
loss.backward()
optimizer.step()
mean_grads = torch.mean(torch.cat([param.grad.cpu().data.contiguous().view(-1)
for param in model.parameters()]))
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tMSE: {:6f}\t|Grad|: {:6f}'.format(
epoch, batch_idx * batch_size, len(train_loader.dataset), 100. * batch_idx / len(train_loader),
loss_meter.avg, mse_meter.avg, mean_grads))
print('====> Epoch: {}\tLoss: {:.4f}\tMSE: {:.6f}'.format(
epoch, loss_meter.avg, mse_meter.avg))
return loss_meter.avg, mse_meter.avg
def validate():
model.eval()
loss_meter = AverageMeter()
mse_meter = AverageMeter()
pbar = tqdm(total=len(val_loader))
for batch_idx, (sketch, label) in enumerate(val_loader):
sketch = Variable(sketch, volatile=True)
label = Variable(label, requires_grad=False)
batch_size = len(sketch)
if args.cuda:
sketch = sketch.cuda()
label = label.cuda()
pred_logits = []
photo_generator = val_dataset.gen_photos()
for photo in photo_generator():
photo = Variable(photo)
if args.cuda:
photo = photo.cuda()
if args.vgg_layer == 'fc6':
photo = photo.repeat(batch_size, 1)
else:
photo = photo.repeat(batch_size, 1, 1, 1)
pred_logit = model(photo, sketch)
pred_logits.append(pred_logit)
pred_logits = torch.cat(pred_logits, dim=1)
pred = F.softmax(pred_logits, dim=1)
loss = args.loss_scale * cross_entropy(pred_logits, label.float())
loss_meter.update(loss.data[0], batch_size)
label_np = label.cpu().data.numpy()
pred_np = pred.cpu().data.numpy()
mse = mean_squared_error(label_np, pred_np)
mse_meter.update(mse, batch_size)
pbar.update()
pbar.close()
print('====> Val Loss: {:.4f}\tVal MSE: {:.6f}'.format(
loss_meter.avg, mse_meter.avg))
return loss_meter.avg, mse_meter.avg
def test():
model.eval()
loss_meter = AverageMeter()
mse_meter = AverageMeter()
pbar = tqdm(total=len(test_loader))
for batch_idx, (sketch, label) in enumerate(test_loader):
sketch = Variable(sketch, volatile=True)
label = Variable(label, requires_grad=False)
batch_size = len(sketch)
if args.cuda:
sketch = sketch.cuda()
label = label.cuda()
pred_logits = []
photo_generator = val_dataset.gen_photos()
for photo in photo_generator():
photo = Variable(photo)
if args.cuda:
photo = photo.cuda()
if args.vgg_layer == 'fc6':
photo = photo.repeat(batch_size, 1)
else:
photo = photo.repeat(batch_size, 1, 1, 1)
pred_logit = model(photo, sketch)
pred_logits.append(pred_logit)
pred_logits = torch.cat(pred_logits, dim=1)
pred = F.softmax(pred_logits, dim=1)
loss = args.loss_scale * cross_entropy(pred_logits, label.float())
loss_meter.update(loss.data[0], batch_size)
label_np = label.cpu().data.numpy()
pred_np = pred.cpu().data.numpy()
mse = mean_squared_error(label_np, pred_np)
mse_meter.update(mse, batch_size)
pbar.update()
pbar.close()
print('====> Test Loss: {:.4f}\tTest MSE: {:.6f}'.format(
loss_meter.avg, mse_meter.avg))
return loss_meter.avg, mse_meter.avg
loss_db = np.zeros((args.epochs, 3))
mse_db = np.zeros((args.epochs, 3))
best_loss = sys.maxint
for epoch in xrange(1, args.epochs + 1):
train_loss, train_mse = train(epoch)
val_loss, val_mse = validate()
test_loss, test_mse = test()
is_best = val_loss < best_loss
best_loss = min(val_loss, best_loss)
save_checkpoint({
'state_dict': model.state_dict(),
'train_loss': train_loss,
'train_mse': train_mse,
'val_loss': val_loss,
'val_mse': val_mse,
'test_loss': test_loss,
'test_mse': test_mse,
'optimizer' : optimizer.state_dict(),
'vgg_layer': args.vgg_layer,
}, is_best, folder=args.out_dir)
# save stuff for plots
loss_db[epoch - 1, 0] = train_loss
loss_db[epoch - 1, 1] = val_loss
loss_db[epoch - 1, 2] = test_loss
mse_db[epoch - 1, 0] = train_mse
mse_db[epoch - 1, 1] = val_mse
mse_db[epoch - 1, 2] = test_mse
# plot the training numbers
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
import seaborn as sns
sns.set_style('whitegrid')
plt.figure()
plt.plot(loss_db[:, 0], '-', label='Train')
plt.plot(loss_db[:, 1], '-', label='Val')
plt.plot(loss_db[:, 2], '-', label='Test')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.tight_layout()
plt.savefig(os.path.join(args.out_dir, 'loss.png'))
plt.figure()
plt.plot(mse_db[:, 0], '-', label='Train')
plt.plot(mse_db[:, 1], '-', label='Val')
plt.plot(mse_db[:, 2], '-', label='Test')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend()
plt.tight_layout()
plt.savefig(os.path.join(args.out_dir, 'mse.png'))
| mit |
dpgoetz/swift | swift/common/middleware/xprofile.py | 36 | 9905 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Profiling middleware for Swift Servers.
The current implementation is based on eventlet aware profiler.(For the
future, more profilers could be added in to collect more data for analysis.)
Profiling all incoming requests and accumulating cpu timing statistics
information for performance tuning and optimization. An mini web UI is also
provided for profiling data analysis. It can be accessed from the URL as
below.
Index page for browse profile data::
http://SERVER_IP:PORT/__profile__
List all profiles to return profile ids in json format::
http://SERVER_IP:PORT/__profile__/
http://SERVER_IP:PORT/__profile__/all
Retrieve specific profile data in different formats::
http://SERVER_IP:PORT/__profile__/PROFILE_ID?format=[default|json|csv|ods]
http://SERVER_IP:PORT/__profile__/current?format=[default|json|csv|ods]
http://SERVER_IP:PORT/__profile__/all?format=[default|json|csv|ods]
Retrieve metrics from specific function in json format::
http://SERVER_IP:PORT/__profile__/PROFILE_ID/NFL?format=json
http://SERVER_IP:PORT/__profile__/current/NFL?format=json
http://SERVER_IP:PORT/__profile__/all/NFL?format=json
NFL is defined by concatenation of file name, function name and the first
line number.
e.g.::
account.py:50(GETorHEAD)
or with full path:
opt/stack/swift/swift/proxy/controllers/account.py:50(GETorHEAD)
A list of URL examples:
http://localhost:8080/__profile__ (proxy server)
http://localhost:6000/__profile__/all (object server)
http://localhost:6001/__profile__/current (container server)
http://localhost:6002/__profile__/12345?format=json (account server)
The profiling middleware can be configured in paste file for WSGI servers such
as proxy, account, container and object servers. Please refer to the sample
configuration files in etc directory.
The profiling data is provided with four formats such as binary(by default),
json, csv and odf spreadsheet which requires installing odfpy library.
sudo pip install odfpy
There's also a simple visualization capability which is enabled by using
matplotlib toolkit. it is also required to be installed if you want to use
it to visualize statistic data.
sudo apt-get install python-matplotlib
"""
import os
import sys
import time
from eventlet import greenthread, GreenPool, patcher
import eventlet.green.profile as eprofile
from swift import gettext_ as _
from swift.common.utils import get_logger, config_true_value
from swift.common.swob import Request
from x_profile.exceptions import NotFoundException, MethodNotAllowed,\
ProfileException
from x_profile.html_viewer import HTMLViewer
from x_profile.profile_model import ProfileLog
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3: # pragma: no cover
text_type = str
else:
text_type = unicode
def bytes_(s, encoding='utf-8', errors='strict'):
if isinstance(s, text_type): # pragma: no cover
return s.encode(encoding, errors)
return s
try:
from urllib.parse import parse_qs
except ImportError:
try:
from urlparse import parse_qs
except ImportError: # pragma: no cover
from cgi import parse_qs
DEFAULT_PROFILE_PREFIX = '/tmp/log/swift/profile/default.profile'
# unwind the iterator; it may call start_response, do lots of work, etc
PROFILE_EXEC_EAGER = """
app_iter = self.app(environ, start_response)
app_iter_ = list(app_iter)
if hasattr(app_iter, 'close'):
app_iter.close()
"""
# don't unwind the iterator (don't consume resources)
PROFILE_EXEC_LAZY = """
app_iter_ = self.app(environ, start_response)
"""
thread = patcher.original('thread') # non-monkeypatched module needed
# This monkey patch code fix the problem of eventlet profile tool
# which can not accumulate profiling results across multiple calls
# of runcalls and runctx.
def new_setup(self):
self._has_setup = True
self.cur = None
self.timings = {}
self.current_tasklet = greenthread.getcurrent()
self.thread_id = thread.get_ident()
self.simulate_call("profiler")
def new_runctx(self, cmd, globals, locals):
if not getattr(self, '_has_setup', False):
self._setup()
try:
return self.base.runctx(self, cmd, globals, locals)
finally:
self.TallyTimings()
def new_runcall(self, func, *args, **kw):
if not getattr(self, '_has_setup', False):
self._setup()
try:
return self.base.runcall(self, func, *args, **kw)
finally:
self.TallyTimings()
class ProfileMiddleware(object):
def __init__(self, app, conf):
self.app = app
self.logger = get_logger(conf, log_route='profile')
self.log_filename_prefix = conf.get('log_filename_prefix',
DEFAULT_PROFILE_PREFIX)
dirname = os.path.dirname(self.log_filename_prefix)
# Notes: this effort may fail due to permission denied.
# it is better to be created and authorized to current
# user in advance.
if not os.path.exists(dirname):
os.makedirs(dirname)
self.dump_interval = float(conf.get('dump_interval', 5.0))
self.dump_timestamp = config_true_value(conf.get(
'dump_timestamp', 'no'))
self.flush_at_shutdown = config_true_value(conf.get(
'flush_at_shutdown', 'no'))
self.path = conf.get('path', '__profile__').replace('/', '')
self.unwind = config_true_value(conf.get('unwind', 'no'))
self.profile_module = conf.get('profile_module',
'eventlet.green.profile')
self.profiler = get_profiler(self.profile_module)
self.profile_log = ProfileLog(self.log_filename_prefix,
self.dump_timestamp)
self.viewer = HTMLViewer(self.path, self.profile_module,
self.profile_log)
self.dump_pool = GreenPool(1000)
self.last_dump_at = None
def __del__(self):
if self.flush_at_shutdown:
self.profile_log.clear(str(os.getpid()))
def _combine_body_qs(self, request):
wsgi_input = request.environ['wsgi.input']
query_dict = request.params
qs_in_body = wsgi_input.read()
query_dict.update(parse_qs(qs_in_body, keep_blank_values=True,
strict_parsing=False))
return query_dict
def dump_checkpoint(self):
current_time = time.time()
if self.last_dump_at is None or self.last_dump_at +\
self.dump_interval < current_time:
self.dump_pool.spawn_n(self.profile_log.dump_profile,
self.profiler, os.getpid())
self.last_dump_at = current_time
def __call__(self, environ, start_response):
request = Request(environ)
path_entry = request.path_info.split('/')
# hijack favicon request sent by browser so that it doesn't
# invoke profiling hook and contaminate the data.
if path_entry[1] == 'favicon.ico':
start_response('200 OK', [])
return ''
elif path_entry[1] == self.path:
try:
self.dump_checkpoint()
query_dict = self._combine_body_qs(request)
content, headers = self.viewer.render(request.url,
request.method,
path_entry,
query_dict,
self.renew_profile)
start_response('200 OK', headers)
return [bytes_(content)]
except MethodNotAllowed as mx:
start_response('405 Method Not Allowed', [])
return '%s' % mx
except NotFoundException as nx:
start_response('404 Not Found', [])
return '%s' % nx
except ProfileException as pf:
start_response('500 Internal Server Error', [])
return '%s' % pf
except Exception as ex:
start_response('500 Internal Server Error', [])
return _('Error on render profiling results: %s') % ex
else:
_locals = locals()
code = self.unwind and PROFILE_EXEC_EAGER or\
PROFILE_EXEC_LAZY
self.profiler.runctx(code, globals(), _locals)
app_iter = _locals['app_iter_']
self.dump_checkpoint()
return app_iter
def renew_profile(self):
self.profiler = get_profiler(self.profile_module)
def get_profiler(profile_module):
if profile_module == 'eventlet.green.profile':
eprofile.Profile._setup = new_setup
eprofile.Profile.runctx = new_runctx
eprofile.Profile.runcall = new_runcall
# hacked method to import profile module supported in python 2.6
__import__(profile_module)
return sys.modules[profile_module].Profile()
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def profile_filter(app):
return ProfileMiddleware(app, conf)
return profile_filter
| apache-2.0 |
tomlof/scikit-learn | sklearn/decomposition/tests/test_nmf.py | 28 | 17934 | import numpy as np
import scipy.sparse as sp
import numbers
from scipy import linalg
from sklearn.decomposition import NMF, non_negative_factorization
from sklearn.decomposition import nmf # For testing internals
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message, assert_no_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.extmath import squared_norm, fast_dot
from sklearn.base import clone
from sklearn.exceptions import ConvergenceWarning
def test_initialize_nn_output():
# Test that initialization does not return negative values
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, NMF(init=name).fit, A)
msg = "Invalid beta_loss parameter: got 'spam' instead of one"
assert_raise_message(ValueError, msg, NMF(solver='mu',
beta_loss=name).fit, A)
msg = "Invalid beta_loss parameter: solver 'cd' does not handle "
msg += "beta_loss = 1.0"
assert_raise_message(ValueError, msg, NMF(solver='cd',
beta_loss=1.0).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
rng = np.random.mtrand.RandomState(42)
data = np.abs(rng.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_almost_equal(evl[ref != 0], ref[ref != 0])
# ignore UserWarning raised when both solver='mu' and init='nndsvd'
@ignore_warnings(category=UserWarning)
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('cd', 'mu'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar', 'random'):
model = NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_nmf_fit_close():
rng = np.random.mtrand.RandomState(42)
# Test that the fit is not too far away
for solver in ('cd', 'mu'):
pnmf = NMF(5, solver=solver, init='nndsvdar', random_state=0,
max_iter=600)
X = np.abs(rng.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.1)
def test_nmf_transform():
# Test that NMF.transform returns close values
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(6, 5))
for solver in ['cd', 'mu']:
m = NMF(solver=solver, n_components=3, init='random',
random_state=0, tol=1e-5)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
def test_nmf_transform_custom_init():
# Smoke test that checks if NMF.transform works with custom initialization
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 5))
n_components = 4
avg = np.sqrt(A.mean() / n_components)
H_init = np.abs(avg * random_state.randn(n_components, 5))
W_init = np.abs(avg * random_state.randn(6, n_components))
m = NMF(solver='cd', n_components=n_components, init='custom',
random_state=0)
m.fit_transform(A, W=W_init, H=H_init)
m.transform(A)
def test_nmf_inverse_transform():
# Test that NMF.inverse_transform returns close values
random_state = np.random.RandomState(0)
A = np.abs(random_state.randn(6, 4))
for solver in ('cd', 'mu'):
m = NMF(solver=solver, n_components=4, init='random', random_state=0,
max_iter=1000)
ft = m.fit_transform(A)
A_new = m.inverse_transform(ft)
assert_array_almost_equal(A, A_new, decimal=2)
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(30, 10))
NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
def test_nmf_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('cd', 'mu'):
est1 = NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
def test_nmf_sparse_transform():
# Test that transform works on sparse data. Issue #2124
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(3, 2))
A[1, 1] = 0
A = csc_matrix(A)
for solver in ('cd', 'mu'):
model = NMF(solver=solver, random_state=0, n_components=2,
max_iter=400)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
rng = np.random.mtrand.RandomState(42)
A = np.abs(rng.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('cd', 'mu'):
W_nmf, H, _ = non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = non_negative_factorization
assert_no_warnings(nnmf, A, A, A, np.int64(1))
msg = ("Number of components must be a positive integer; "
"got (n_components=1.5)")
assert_raise_message(ValueError, msg, nnmf, A, A, A, 1.5)
msg = ("Number of components must be a positive integer; "
"got (n_components='2')")
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
msg = "Invalid regularization parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom', True,
'cd', 2., 1e-4, 200, 0., 0., 'spam')
def _beta_divergence_dense(X, W, H, beta):
"""Compute the beta-divergence of X and W.H for dense array only.
Used as a reference for testing nmf._beta_divergence.
"""
if isinstance(X, numbers.Number):
W = np.array([[W]])
H = np.array([[H]])
X = np.array([[X]])
WH = fast_dot(W, H)
if beta == 2:
return squared_norm(X - WH) / 2
WH_Xnonzero = WH[X != 0]
X_nonzero = X[X != 0]
np.maximum(WH_Xnonzero, 1e-9, out=WH_Xnonzero)
if beta == 1:
res = np.sum(X_nonzero * np.log(X_nonzero / WH_Xnonzero))
res += WH.sum() - X.sum()
elif beta == 0:
div = X_nonzero / WH_Xnonzero
res = np.sum(div) - X.size - np.sum(np.log(div))
else:
res = (X_nonzero ** beta).sum()
res += (beta - 1) * (WH ** beta).sum()
res -= beta * (X_nonzero * (WH_Xnonzero ** (beta - 1))).sum()
res /= beta * (beta - 1)
return res
def test_beta_divergence():
# Compare _beta_divergence with the reference _beta_divergence_dense
n_samples = 20
n_features = 10
n_components = 5
beta_losses = [0., 0.5, 1., 1.5, 2.]
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W, H = nmf._initialize_nmf(X, n_components, init='random', random_state=42)
for beta in beta_losses:
ref = _beta_divergence_dense(X, W, H, beta)
loss = nmf._beta_divergence(X, W, H, beta)
loss_csr = nmf._beta_divergence(X_csr, W, H, beta)
assert_almost_equal(ref, loss, decimal=7)
assert_almost_equal(ref, loss_csr, decimal=7)
def test_special_sparse_dot():
# Test the function that computes np.dot(W, H), only where X is non zero.
n_samples = 10
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0.
X_csr = sp.csr_matrix(X)
W = np.abs(rng.randn(n_samples, n_components))
H = np.abs(rng.randn(n_components, n_features))
WH_safe = nmf._special_sparse_dot(W, H, X_csr)
WH = nmf._special_sparse_dot(W, H, X)
# test that both results have same values, in X_csr nonzero elements
ii, jj = X_csr.nonzero()
WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)
# test that WH_safe and X_csr have the same sparse structure
assert_array_equal(WH_safe.indices, X_csr.indices)
assert_array_equal(WH_safe.indptr, X_csr.indptr)
assert_array_equal(WH_safe.shape, X_csr.shape)
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_multiplicative_update_sparse():
# Compare sparse and dense input in multiplicative update NMF
# Also test continuity of the results with respect to beta_loss parameter
n_samples = 20
n_features = 10
n_components = 5
alpha = 0.1
l1_ratio = 0.5
n_iter = 20
# initialization
rng = np.random.mtrand.RandomState(1337)
X = rng.randn(n_samples, n_features)
X = np.abs(X)
X_csr = sp.csr_matrix(X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
# Reference with dense array X
W, H = W0.copy(), H0.copy()
W1, H1, _ = non_negative_factorization(
X, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
# Compare with sparse X
W, H = W0.copy(), H0.copy()
W2, H2, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W2, decimal=7)
assert_array_almost_equal(H1, H2, decimal=7)
# Compare with almost same beta_loss, since some values have a specific
# behavior, but the results should be continuous w.r.t beta_loss
beta_loss -= 1.e-5
W, H = W0.copy(), H0.copy()
W3, H3, _ = non_negative_factorization(
X_csr, W, H, n_components, init='custom', update_H=True,
solver='mu', beta_loss=beta_loss, max_iter=n_iter, alpha=alpha,
l1_ratio=l1_ratio, regularization='both', random_state=42)
assert_array_almost_equal(W1, W3, decimal=4)
assert_array_almost_equal(H1, H3, decimal=4)
def test_nmf_negative_beta_loss():
# Test that an error is raised if beta_loss < 0 and X contains zeros.
# Test that the output has not NaN values when the input contains zeros.
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
X[X < 0] = 0
X_csr = sp.csr_matrix(X)
def _assert_nmf_no_nan(X, beta_loss):
W, H, _ = non_negative_factorization(
X, n_components=n_components, solver='mu', beta_loss=beta_loss,
random_state=0, max_iter=1000)
assert_false(np.any(np.isnan(W)))
assert_false(np.any(np.isnan(H)))
msg = "When beta_loss <= 0 and X contains zeros, the solver may diverge."
for beta_loss in (-0.6, 0.):
assert_raise_message(ValueError, msg, _assert_nmf_no_nan, X, beta_loss)
_assert_nmf_no_nan(X + 1e-9, beta_loss)
for beta_loss in (0.2, 1., 1.2, 2., 2.5):
_assert_nmf_no_nan(X, beta_loss)
_assert_nmf_no_nan(X_csr, beta_loss)
def test_nmf_regularization():
# Test the effect of L1 and L2 regularizations
n_samples = 6
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = np.abs(rng.randn(n_samples, n_features))
# L1 regularization should increase the number of zeros
l1_ratio = 1.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
W_regul_n_zeros = W_regul[W_regul == 0].size
W_model_n_zeros = W_model[W_model == 0].size
H_regul_n_zeros = H_regul[H_regul == 0].size
H_model_n_zeros = H_model[H_model == 0].size
assert_greater(W_regul_n_zeros, W_model_n_zeros)
assert_greater(H_regul_n_zeros, H_model_n_zeros)
# L2 regularization should decrease the mean of the coefficients
l1_ratio = 0.
for solver in ['cd', 'mu']:
regul = nmf.NMF(n_components=n_components, solver=solver,
alpha=0.5, l1_ratio=l1_ratio, random_state=42)
model = nmf.NMF(n_components=n_components, solver=solver,
alpha=0., l1_ratio=l1_ratio, random_state=42)
W_regul = regul.fit_transform(X)
W_model = model.fit_transform(X)
H_regul = regul.components_
H_model = model.components_
assert_greater(W_model.mean(), W_regul.mean())
assert_greater(H_model.mean(), H_regul.mean())
@ignore_warnings(category=ConvergenceWarning)
def test_nmf_decreasing():
# test that the objective function is decreasing at each iteration
n_samples = 20
n_features = 15
n_components = 10
alpha = 0.1
l1_ratio = 0.5
tol = 0.
# initialization
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.abs(X, X)
W0, H0 = nmf._initialize_nmf(X, n_components, init='random',
random_state=42)
for beta_loss in (-1.2, 0, 0.2, 1., 2., 2.5):
for solver in ('cd', 'mu'):
if solver != 'mu' and beta_loss != 2:
# not implemented
continue
W, H = W0.copy(), H0.copy()
previous_loss = None
for _ in range(30):
# one more iteration starting from the previous results
W, H, _ = non_negative_factorization(
X, W, H, beta_loss=beta_loss, init='custom',
n_components=n_components, max_iter=1, alpha=alpha,
solver=solver, tol=tol, l1_ratio=l1_ratio, verbose=0,
regularization='both', random_state=0, update_H=True)
loss = nmf._beta_divergence(X, W, H, beta_loss)
if previous_loss is not None:
assert_greater(previous_loss, loss)
previous_loss = loss
| bsd-3-clause |
hdmetor/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 216 | 8091 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
ftfarias/PySubsim | old/sound/peak_detection_1.py | 1 | 2435 | # from https://gist.github.com/endolith/250860
import sys
from numpy import NaN, Inf, arange, isscalar, asarray, array
def peakdet(v, delta, x = None):
"""
Converted from MATLAB script at http://billauer.co.il/peakdet.html
Returns two arrays
function [maxtab, mintab]=peakdet(v, delta, x)
%PEAKDET Detect peaks in a vector
% [MAXTAB, MINTAB] = PEAKDET(V, DELTA) finds the local
% maxima and minima ("peaks") in the vector V.
% MAXTAB and MINTAB consists of two columns. Column 1
% contains indices in V, and column 2 the found values.
%
% With [MAXTAB, MINTAB] = PEAKDET(V, DELTA, X) the indices
% in MAXTAB and MINTAB are replaced with the corresponding
% X-values.
%
% A point is considered a maximum peak if it has the maximal
% value, and was preceded (to the left) by a value lower by
% DELTA.
% Eli Billauer, 3.4.05 (Explicitly not copyrighted).
% This function is released to the public domain; Any use is allowed.
"""
maxtab = []
mintab = []
if x is None:
x = arange(len(v))
v = asarray(v)
if len(v) != len(x):
sys.exit('Input vectors v and x must have same length')
if not isscalar(delta):
sys.exit('Input argument delta must be a scalar')
if delta <= 0:
sys.exit('Input argument delta must be positive')
mn, mx = Inf, -Inf
mnpos, mxpos = NaN, NaN
lookformax = True
for i in arange(len(v)):
this = v[i]
if this > mx:
mx = this
mxpos = x[i]
if this < mn:
mn = this
mnpos = x[i]
if lookformax:
if this < mx-delta:
maxtab.append((mxpos, mx))
mn = this
mnpos = x[i]
lookformax = False
else:
if this > mn+delta:
mintab.append((mnpos, mn))
mx = this
mxpos = x[i]
lookformax = True
return array(maxtab), array(mintab)
if __name__=="__main__":
from matplotlib.pyplot import plot, scatter, show
series = [0,0,0,2,0,0,0,-2,0,0,0,2,0,0,0,-2,0]
maxtab, mintab = peakdet(series,.3)
plot(series)
scatter(array(maxtab)[:,0], array(maxtab)[:,1], color='blue')
scatter(array(mintab)[:,0], array(mintab)[:,1], color='red')
show() | gpl-3.0 |
pradyu1993/scikit-learn | doc/sphinxext/numpy_ext/docscrape_sphinx.py | 52 | 8004 | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString
from docscrape import FunctionDoc
from docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
openturns/otmorris | python/src/plot_sensitivity.py | 1 | 3391 | """
Plot Morris elementary effects
"""
import openturns as ot
import numpy as np
import matplotlib
import pylab as plt
import warnings
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble'] = [r"\usepackage{amsmath}"]
class PlotEE(object):
"""
Plot elementary effects
-----------------------
The class perform the plot of sensitivity indices issued from Morris class.
Parameters
----------
morris: :class:`~otmorris.Morris`
A Morris object.
output_marginal: int
Index of output marginal of interest.
Default value is 0
absolute_mean: bool
Interest is mean of absolute elementary effects .
Default value is True
title: str
Title for the graph
"""
def __init__(self,
result,
output_marginal=0,
absolute_mean=True,
title="Elementary effects",
**kwargs):
# set figure
self._fig, self._ax = plt.subplots()
# Check input object type
if not (hasattr(result, 'getStandardDeviationElementaryEffects') and hasattr(result, 'getClassName')):
raise TypeError(" `result` should be of class Morris ")
if absolute_mean:
mean = result.getMeanAbsoluteElementaryEffects(output_marginal)
else:
mean = result.getMeanElementaryEffects(output_marginal)
sigma = result.getStandardDeviationElementaryEffects(output_marginal)
dim = len(sigma)
input_description = map(lambda x: "X" + str(x + 1), range(dim))
# Plot effects
self._ax.plot(mean, sigma, 'bo')
# Annotate points
dmu = (plt.np.max(mean) - plt.np.min(mean)) / len(mean)
dsg = (plt.np.max(sigma) - plt.np.min(sigma)) / len(sigma)
for i, txt in enumerate(input_description):
self._ax.annotate(
txt, (mean[i] + 0.05 * dmu, sigma[i] + 0.05 * dsg))
self._ax.set_xlabel(r"$\boldsymbol{\mu}$", fontsize=14)
self._ax.set_ylabel(r"$\boldsymbol{\sigma}$", fontsize=14)
self._ax.grid(True)
self._fig.suptitle(title, fontsize=18)
def show(self, **kwargs):
"""
Display the graph on screen.
Parameters
----------
kwargs:
block: bool, optional
If true (default), block until the graph is closed.
These parameters are passed to matplotlib.pyplot.show()
"""
self._fig.show(**kwargs)
def save(self, fname, **kwargs):
"""
Save the graph as file.
Parameters
----------
fname: bool, optional
A string containing a path to a filename from which file format is deduced.
kwargs:
Refer to matplotlib.figure.Figure.savefig documentation for valid keyword arguments.
"""
self._fig.savefig(fname, **kwargs)
def getFigure(self):
"""
Accessor to the underlying figure object.
Refer to matplotlib.figure.Figure for further information.
"""
return self._fig
def getAxes(self):
"""
Get the list of Axes objects.
Refer to matplotlib.axes.Axes for further information.
"""
return self._ax
def close(self):
"""Close the figure."""
plt.close(self._fig)
| lgpl-3.0 |
spallavolu/scikit-learn | sklearn/utils/tests/test_seq_dataset.py | 93 | 2471 | # Author: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.seq_dataset import ArrayDataset, CSRDataset
from sklearn.datasets import load_iris
from numpy.testing import assert_array_equal
from nose.tools import assert_equal
iris = load_iris()
X = iris.data.astype(np.float64)
y = iris.target.astype(np.float64)
X_csr = sp.csr_matrix(X)
sample_weight = np.arange(y.size, dtype=np.float64)
def test_seq_dataset():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
for dataset in (dataset1, dataset2):
for i in range(5):
# next sample
xi_, yi, swi, idx = dataset._next_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_array_equal(xi.data, X_csr[idx].data)
assert_array_equal(xi.indices, X_csr[idx].indices)
assert_array_equal(xi.indptr, X_csr[idx].indptr)
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
# random sample
xi_, yi, swi, idx = dataset._random_py()
xi = sp.csr_matrix((xi_), shape=(1, X.shape[1]))
assert_array_equal(xi.data, X_csr[idx].data)
assert_array_equal(xi.indices, X_csr[idx].indices)
assert_array_equal(xi.indptr, X_csr[idx].indptr)
assert_equal(yi, y[idx])
assert_equal(swi, sample_weight[idx])
def test_seq_dataset_shuffle():
dataset1 = ArrayDataset(X, y, sample_weight, seed=42)
dataset2 = CSRDataset(X_csr.data, X_csr.indptr, X_csr.indices,
y, sample_weight, seed=42)
# not shuffled
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, i)
assert_equal(idx2, i)
for i in range(5):
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
seed = 77
dataset1._shuffle_py(seed)
dataset2._shuffle_py(seed)
for i in range(5):
_, _, _, idx1 = dataset1._next_py()
_, _, _, idx2 = dataset2._next_py()
assert_equal(idx1, idx2)
_, _, _, idx1 = dataset1._random_py()
_, _, _, idx2 = dataset2._random_py()
assert_equal(idx1, idx2)
| bsd-3-clause |
kdebrab/pandas | pandas/tests/dtypes/test_common.py | 3 | 23804 | # -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas.core.dtypes.dtypes import (DatetimeTZDtype, PeriodDtype,
CategoricalDtype, IntervalDtype)
import pandas.core.dtypes.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
class TestPandasDtype(object):
# Passing invalid dtype, both as a string or object, must raise TypeError
# Per issue GH15520
@pytest.mark.parametrize('box', [pd.Timestamp, 'pd.Timestamp', list])
def test_invalid_dtype_error(self, box):
with tm.assert_raises_regex(TypeError, 'not understood'):
com.pandas_dtype(box)
@pytest.mark.parametrize('dtype', [
object, 'float64', np.object_, np.dtype('object'), 'O',
np.float64, float, np.dtype('float64')])
def test_pandas_dtype_valid(self, dtype):
assert com.pandas_dtype(dtype) == dtype
@pytest.mark.parametrize('dtype', [
'M8[ns]', 'm8[ns]', 'object', 'float64', 'int64'])
def test_numpy_dtype(self, dtype):
assert com.pandas_dtype(dtype) == np.dtype(dtype)
def test_numpy_string_dtype(self):
# do not parse freq-like string as period dtype
assert com.pandas_dtype('U') == np.dtype('U')
assert com.pandas_dtype('S') == np.dtype('S')
@pytest.mark.parametrize('dtype', [
'datetime64[ns, US/Eastern]',
'datetime64[ns, Asia/Tokyo]',
'datetime64[ns, UTC]'])
def test_datetimetz_dtype(self, dtype):
assert com.pandas_dtype(dtype) is DatetimeTZDtype(dtype)
assert com.pandas_dtype(dtype) == DatetimeTZDtype(dtype)
assert com.pandas_dtype(dtype) == dtype
def test_categorical_dtype(self):
assert com.pandas_dtype('category') == CategoricalDtype()
@pytest.mark.parametrize('dtype', [
'period[D]', 'period[3M]', 'period[U]',
'Period[D]', 'Period[3M]', 'Period[U]'])
def test_period_dtype(self, dtype):
assert com.pandas_dtype(dtype) is PeriodDtype(dtype)
assert com.pandas_dtype(dtype) == PeriodDtype(dtype)
assert com.pandas_dtype(dtype) == dtype
dtypes = dict(datetime_tz=com.pandas_dtype('datetime64[ns, US/Eastern]'),
datetime=com.pandas_dtype('datetime64[ns]'),
timedelta=com.pandas_dtype('timedelta64[ns]'),
period=PeriodDtype('D'),
integer=np.dtype(np.int64),
float=np.dtype(np.float64),
object=np.dtype(np.object),
category=com.pandas_dtype('category'))
@pytest.mark.parametrize('name1,dtype1',
list(dtypes.items()),
ids=lambda x: str(x))
@pytest.mark.parametrize('name2,dtype2',
list(dtypes.items()),
ids=lambda x: str(x))
def test_dtype_equal(name1, dtype1, name2, dtype2):
# match equal to self, but not equal to other
assert com.is_dtype_equal(dtype1, dtype1)
if name1 != name2:
assert not com.is_dtype_equal(dtype1, dtype2)
def test_dtype_equal_strict():
# we are strict on kind equality
for dtype in [np.int8, np.int16, np.int32]:
assert not com.is_dtype_equal(np.int64, dtype)
for dtype in [np.float32]:
assert not com.is_dtype_equal(np.float64, dtype)
# strict w.r.t. PeriodDtype
assert not com.is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D'))
# strict w.r.t. datetime64
assert not com.is_dtype_equal(
com.pandas_dtype('datetime64[ns, US/Eastern]'),
com.pandas_dtype('datetime64[ns, CET]'))
# see gh-15941: no exception should be raised
assert not com.is_dtype_equal(None, None)
def get_is_dtype_funcs():
"""
Get all functions in pandas.core.dtypes.common that
begin with 'is_' and end with 'dtype'
"""
fnames = [f for f in dir(com) if (f.startswith('is_') and
f.endswith('dtype'))]
return [getattr(com, fname) for fname in fnames]
@pytest.mark.parametrize('func',
get_is_dtype_funcs(),
ids=lambda x: x.__name__)
def test_get_dtype_error_catch(func):
# see gh-15941
#
# No exception should be raised.
assert not func(None)
def test_is_object():
assert com.is_object_dtype(object)
assert com.is_object_dtype(np.array([], dtype=object))
assert not com.is_object_dtype(int)
assert not com.is_object_dtype(np.array([], dtype=int))
assert not com.is_object_dtype([1, 2, 3])
@pytest.mark.parametrize("check_scipy", [
False, pytest.param(True, marks=td.skip_if_no_scipy)
])
def test_is_sparse(check_scipy):
assert com.is_sparse(pd.SparseArray([1, 2, 3]))
assert com.is_sparse(pd.SparseSeries([1, 2, 3]))
assert not com.is_sparse(np.array([1, 2, 3]))
if check_scipy:
import scipy.sparse
assert not com.is_sparse(scipy.sparse.bsr_matrix([1, 2, 3]))
@td.skip_if_no_scipy
def test_is_scipy_sparse():
from scipy.sparse import bsr_matrix
assert com.is_scipy_sparse(bsr_matrix([1, 2, 3]))
assert not com.is_scipy_sparse(pd.SparseArray([1, 2, 3]))
assert not com.is_scipy_sparse(pd.SparseSeries([1, 2, 3]))
def test_is_categorical():
cat = pd.Categorical([1, 2, 3])
assert com.is_categorical(cat)
assert com.is_categorical(pd.Series(cat))
assert com.is_categorical(pd.CategoricalIndex([1, 2, 3]))
assert not com.is_categorical([1, 2, 3])
def test_is_datetimetz():
assert not com.is_datetimetz([1, 2, 3])
assert not com.is_datetimetz(pd.DatetimeIndex([1, 2, 3]))
assert com.is_datetimetz(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
dtype = DatetimeTZDtype("ns", tz="US/Eastern")
s = pd.Series([], dtype=dtype)
assert com.is_datetimetz(s)
def test_is_period():
assert not com.is_period([1, 2, 3])
assert not com.is_period(pd.Index([1, 2, 3]))
assert com.is_period(pd.PeriodIndex(["2017-01-01"], freq="D"))
def test_is_datetime64_dtype():
assert not com.is_datetime64_dtype(object)
assert not com.is_datetime64_dtype([1, 2, 3])
assert not com.is_datetime64_dtype(np.array([], dtype=int))
assert com.is_datetime64_dtype(np.datetime64)
assert com.is_datetime64_dtype(np.array([], dtype=np.datetime64))
def test_is_datetime64tz_dtype():
assert not com.is_datetime64tz_dtype(object)
assert not com.is_datetime64tz_dtype([1, 2, 3])
assert not com.is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3]))
assert com.is_datetime64tz_dtype(pd.DatetimeIndex(
[1, 2, 3], tz="US/Eastern"))
def test_is_timedelta64_dtype():
assert not com.is_timedelta64_dtype(object)
assert not com.is_timedelta64_dtype(None)
assert not com.is_timedelta64_dtype([1, 2, 3])
assert not com.is_timedelta64_dtype(np.array([], dtype=np.datetime64))
assert not com.is_timedelta64_dtype('0 days')
assert not com.is_timedelta64_dtype("0 days 00:00:00")
assert not com.is_timedelta64_dtype(["0 days 00:00:00"])
assert not com.is_timedelta64_dtype("NO DATE")
assert com.is_timedelta64_dtype(np.timedelta64)
assert com.is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]"))
assert com.is_timedelta64_dtype(pd.to_timedelta(['0 days', '1 days']))
def test_is_period_dtype():
assert not com.is_period_dtype(object)
assert not com.is_period_dtype([1, 2, 3])
assert not com.is_period_dtype(pd.Period("2017-01-01"))
assert com.is_period_dtype(PeriodDtype(freq="D"))
assert com.is_period_dtype(pd.PeriodIndex([], freq="A"))
def test_is_interval_dtype():
assert not com.is_interval_dtype(object)
assert not com.is_interval_dtype([1, 2, 3])
assert com.is_interval_dtype(IntervalDtype())
interval = pd.Interval(1, 2, closed="right")
assert not com.is_interval_dtype(interval)
assert com.is_interval_dtype(pd.IntervalIndex([interval]))
def test_is_categorical_dtype():
assert not com.is_categorical_dtype(object)
assert not com.is_categorical_dtype([1, 2, 3])
assert com.is_categorical_dtype(CategoricalDtype())
assert com.is_categorical_dtype(pd.Categorical([1, 2, 3]))
assert com.is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))
def test_is_string_dtype():
assert not com.is_string_dtype(int)
assert not com.is_string_dtype(pd.Series([1, 2]))
assert com.is_string_dtype(str)
assert com.is_string_dtype(object)
assert com.is_string_dtype(np.array(['a', 'b']))
def test_is_period_arraylike():
assert not com.is_period_arraylike([1, 2, 3])
assert not com.is_period_arraylike(pd.Index([1, 2, 3]))
assert com.is_period_arraylike(pd.PeriodIndex(["2017-01-01"], freq="D"))
def test_is_datetime_arraylike():
assert not com.is_datetime_arraylike([1, 2, 3])
assert not com.is_datetime_arraylike(pd.Index([1, 2, 3]))
assert com.is_datetime_arraylike(pd.DatetimeIndex([1, 2, 3]))
def test_is_datetimelike():
assert not com.is_datetimelike([1, 2, 3])
assert not com.is_datetimelike(pd.Index([1, 2, 3]))
assert com.is_datetimelike(pd.DatetimeIndex([1, 2, 3]))
assert com.is_datetimelike(pd.PeriodIndex([], freq="A"))
assert com.is_datetimelike(np.array([], dtype=np.datetime64))
assert com.is_datetimelike(pd.Series([], dtype="timedelta64[ns]"))
assert com.is_datetimelike(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
dtype = DatetimeTZDtype("ns", tz="US/Eastern")
s = pd.Series([], dtype=dtype)
assert com.is_datetimelike(s)
def test_is_integer_dtype():
assert not com.is_integer_dtype(str)
assert not com.is_integer_dtype(float)
assert not com.is_integer_dtype(np.datetime64)
assert not com.is_integer_dtype(np.timedelta64)
assert not com.is_integer_dtype(pd.Index([1, 2.]))
assert not com.is_integer_dtype(np.array(['a', 'b']))
assert not com.is_integer_dtype(np.array([], dtype=np.timedelta64))
assert com.is_integer_dtype(int)
assert com.is_integer_dtype(np.uint64)
assert com.is_integer_dtype(pd.Series([1, 2]))
def test_is_signed_integer_dtype():
assert not com.is_signed_integer_dtype(str)
assert not com.is_signed_integer_dtype(float)
assert not com.is_signed_integer_dtype(np.uint64)
assert not com.is_signed_integer_dtype(np.datetime64)
assert not com.is_signed_integer_dtype(np.timedelta64)
assert not com.is_signed_integer_dtype(pd.Index([1, 2.]))
assert not com.is_signed_integer_dtype(np.array(['a', 'b']))
assert not com.is_signed_integer_dtype(np.array([1, 2], dtype=np.uint32))
assert not com.is_signed_integer_dtype(np.array([], dtype=np.timedelta64))
assert com.is_signed_integer_dtype(int)
assert com.is_signed_integer_dtype(pd.Series([1, 2]))
def test_is_unsigned_integer_dtype():
assert not com.is_unsigned_integer_dtype(str)
assert not com.is_unsigned_integer_dtype(int)
assert not com.is_unsigned_integer_dtype(float)
assert not com.is_unsigned_integer_dtype(pd.Series([1, 2]))
assert not com.is_unsigned_integer_dtype(pd.Index([1, 2.]))
assert not com.is_unsigned_integer_dtype(np.array(['a', 'b']))
assert com.is_unsigned_integer_dtype(np.uint64)
assert com.is_unsigned_integer_dtype(np.array([1, 2], dtype=np.uint32))
def test_is_int64_dtype():
assert not com.is_int64_dtype(str)
assert not com.is_int64_dtype(float)
assert not com.is_int64_dtype(np.int32)
assert not com.is_int64_dtype(np.uint64)
assert not com.is_int64_dtype(pd.Index([1, 2.]))
assert not com.is_int64_dtype(np.array(['a', 'b']))
assert not com.is_int64_dtype(np.array([1, 2], dtype=np.uint32))
assert com.is_int64_dtype(np.int64)
assert com.is_int64_dtype(np.array([1, 2], dtype=np.int64))
def test_is_int_or_datetime_dtype():
assert not com.is_int_or_datetime_dtype(str)
assert not com.is_int_or_datetime_dtype(float)
assert not com.is_int_or_datetime_dtype(pd.Index([1, 2.]))
assert not com.is_int_or_datetime_dtype(np.array(['a', 'b']))
assert com.is_int_or_datetime_dtype(int)
assert com.is_int_or_datetime_dtype(np.uint64)
assert com.is_int_or_datetime_dtype(np.datetime64)
assert com.is_int_or_datetime_dtype(np.timedelta64)
assert com.is_int_or_datetime_dtype(pd.Series([1, 2]))
assert com.is_int_or_datetime_dtype(np.array([], dtype=np.datetime64))
assert com.is_int_or_datetime_dtype(np.array([], dtype=np.timedelta64))
def test_is_datetime64_any_dtype():
assert not com.is_datetime64_any_dtype(int)
assert not com.is_datetime64_any_dtype(str)
assert not com.is_datetime64_any_dtype(np.array([1, 2]))
assert not com.is_datetime64_any_dtype(np.array(['a', 'b']))
assert com.is_datetime64_any_dtype(np.datetime64)
assert com.is_datetime64_any_dtype(np.array([], dtype=np.datetime64))
assert com.is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern"))
assert com.is_datetime64_any_dtype(pd.DatetimeIndex([1, 2, 3],
dtype=np.datetime64))
def test_is_datetime64_ns_dtype():
assert not com.is_datetime64_ns_dtype(int)
assert not com.is_datetime64_ns_dtype(str)
assert not com.is_datetime64_ns_dtype(np.datetime64)
assert not com.is_datetime64_ns_dtype(np.array([1, 2]))
assert not com.is_datetime64_ns_dtype(np.array(['a', 'b']))
assert not com.is_datetime64_ns_dtype(np.array([], dtype=np.datetime64))
# This datetime array has the wrong unit (ps instead of ns)
assert not com.is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]"))
assert com.is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern"))
assert com.is_datetime64_ns_dtype(pd.DatetimeIndex([1, 2, 3],
dtype=np.datetime64))
def test_is_timedelta64_ns_dtype():
assert not com.is_timedelta64_ns_dtype(np.dtype('m8[ps]'))
assert not com.is_timedelta64_ns_dtype(
np.array([1, 2], dtype=np.timedelta64))
assert com.is_timedelta64_ns_dtype(np.dtype('m8[ns]'))
assert com.is_timedelta64_ns_dtype(np.array([1, 2], dtype='m8[ns]'))
def test_is_datetime_or_timedelta_dtype():
assert not com.is_datetime_or_timedelta_dtype(int)
assert not com.is_datetime_or_timedelta_dtype(str)
assert not com.is_datetime_or_timedelta_dtype(pd.Series([1, 2]))
assert not com.is_datetime_or_timedelta_dtype(np.array(['a', 'b']))
assert com.is_datetime_or_timedelta_dtype(np.datetime64)
assert com.is_datetime_or_timedelta_dtype(np.timedelta64)
assert com.is_datetime_or_timedelta_dtype(
np.array([], dtype=np.timedelta64))
assert com.is_datetime_or_timedelta_dtype(
np.array([], dtype=np.datetime64))
def test_is_numeric_v_string_like():
assert not com.is_numeric_v_string_like(1, 1)
assert not com.is_numeric_v_string_like(1, "foo")
assert not com.is_numeric_v_string_like("foo", "foo")
assert not com.is_numeric_v_string_like(np.array([1]), np.array([2]))
assert not com.is_numeric_v_string_like(
np.array(["foo"]), np.array(["foo"]))
assert com.is_numeric_v_string_like(np.array([1]), "foo")
assert com.is_numeric_v_string_like("foo", np.array([1]))
assert com.is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))
assert com.is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))
def test_is_datetimelike_v_numeric():
dt = np.datetime64(pd.datetime(2017, 1, 1))
assert not com.is_datetimelike_v_numeric(1, 1)
assert not com.is_datetimelike_v_numeric(dt, dt)
assert not com.is_datetimelike_v_numeric(np.array([1]), np.array([2]))
assert not com.is_datetimelike_v_numeric(np.array([dt]), np.array([dt]))
assert com.is_datetimelike_v_numeric(1, dt)
assert com.is_datetimelike_v_numeric(1, dt)
assert com.is_datetimelike_v_numeric(np.array([dt]), 1)
assert com.is_datetimelike_v_numeric(np.array([1]), dt)
assert com.is_datetimelike_v_numeric(np.array([dt]), np.array([1]))
def test_is_datetimelike_v_object():
obj = object()
dt = np.datetime64(pd.datetime(2017, 1, 1))
assert not com.is_datetimelike_v_object(dt, dt)
assert not com.is_datetimelike_v_object(obj, obj)
assert not com.is_datetimelike_v_object(np.array([dt]), np.array([1]))
assert not com.is_datetimelike_v_object(np.array([dt]), np.array([dt]))
assert not com.is_datetimelike_v_object(np.array([obj]), np.array([obj]))
assert com.is_datetimelike_v_object(dt, obj)
assert com.is_datetimelike_v_object(obj, dt)
assert com.is_datetimelike_v_object(np.array([dt]), obj)
assert com.is_datetimelike_v_object(np.array([obj]), dt)
assert com.is_datetimelike_v_object(np.array([dt]), np.array([obj]))
def test_needs_i8_conversion():
assert not com.needs_i8_conversion(str)
assert not com.needs_i8_conversion(np.int64)
assert not com.needs_i8_conversion(pd.Series([1, 2]))
assert not com.needs_i8_conversion(np.array(['a', 'b']))
assert com.needs_i8_conversion(np.datetime64)
assert com.needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))
assert com.needs_i8_conversion(pd.DatetimeIndex(
[1, 2, 3], tz="US/Eastern"))
def test_is_numeric_dtype():
assert not com.is_numeric_dtype(str)
assert not com.is_numeric_dtype(np.datetime64)
assert not com.is_numeric_dtype(np.timedelta64)
assert not com.is_numeric_dtype(np.array(['a', 'b']))
assert not com.is_numeric_dtype(np.array([], dtype=np.timedelta64))
assert com.is_numeric_dtype(int)
assert com.is_numeric_dtype(float)
assert com.is_numeric_dtype(np.uint64)
assert com.is_numeric_dtype(pd.Series([1, 2]))
assert com.is_numeric_dtype(pd.Index([1, 2.]))
def test_is_string_like_dtype():
assert not com.is_string_like_dtype(object)
assert not com.is_string_like_dtype(pd.Series([1, 2]))
assert com.is_string_like_dtype(str)
assert com.is_string_like_dtype(np.array(['a', 'b']))
def test_is_float_dtype():
assert not com.is_float_dtype(str)
assert not com.is_float_dtype(int)
assert not com.is_float_dtype(pd.Series([1, 2]))
assert not com.is_float_dtype(np.array(['a', 'b']))
assert com.is_float_dtype(float)
assert com.is_float_dtype(pd.Index([1, 2.]))
def test_is_bool_dtype():
assert not com.is_bool_dtype(int)
assert not com.is_bool_dtype(str)
assert not com.is_bool_dtype(pd.Series([1, 2]))
assert not com.is_bool_dtype(np.array(['a', 'b']))
assert not com.is_bool_dtype(pd.Index(['a', 'b']))
assert com.is_bool_dtype(bool)
assert com.is_bool_dtype(np.bool)
assert com.is_bool_dtype(np.array([True, False]))
assert com.is_bool_dtype(pd.Index([True, False]))
@pytest.mark.parametrize("check_scipy", [
False, pytest.param(True, marks=td.skip_if_no_scipy)
])
def test_is_extension_type(check_scipy):
assert not com.is_extension_type([1, 2, 3])
assert not com.is_extension_type(np.array([1, 2, 3]))
assert not com.is_extension_type(pd.DatetimeIndex([1, 2, 3]))
cat = pd.Categorical([1, 2, 3])
assert com.is_extension_type(cat)
assert com.is_extension_type(pd.Series(cat))
assert com.is_extension_type(pd.SparseArray([1, 2, 3]))
assert com.is_extension_type(pd.SparseSeries([1, 2, 3]))
assert com.is_extension_type(pd.DatetimeIndex([1, 2, 3], tz="US/Eastern"))
dtype = DatetimeTZDtype("ns", tz="US/Eastern")
s = pd.Series([], dtype=dtype)
assert com.is_extension_type(s)
if check_scipy:
import scipy.sparse
assert not com.is_extension_type(scipy.sparse.bsr_matrix([1, 2, 3]))
def test_is_complex_dtype():
assert not com.is_complex_dtype(int)
assert not com.is_complex_dtype(str)
assert not com.is_complex_dtype(pd.Series([1, 2]))
assert not com.is_complex_dtype(np.array(['a', 'b']))
assert com.is_complex_dtype(np.complex)
assert com.is_complex_dtype(np.array([1 + 1j, 5]))
def test_is_offsetlike():
assert com.is_offsetlike(np.array([pd.DateOffset(month=3),
pd.offsets.Nano()]))
assert com.is_offsetlike(pd.offsets.MonthEnd())
assert com.is_offsetlike(pd.Index([pd.DateOffset(second=1)]))
assert not com.is_offsetlike(pd.Timedelta(1))
assert not com.is_offsetlike(np.array([1 + 1j, 5]))
# mixed case
assert not com.is_offsetlike(np.array([pd.DateOffset(), pd.Timestamp(0)]))
@pytest.mark.parametrize('input_param,result', [
(int, np.dtype(int)),
('int32', np.dtype('int32')),
(float, np.dtype(float)),
('float64', np.dtype('float64')),
(np.dtype('float64'), np.dtype('float64')),
(str, np.dtype(str)),
(pd.Series([1, 2], dtype=np.dtype('int16')), np.dtype('int16')),
(pd.Series(['a', 'b']), np.dtype(object)),
(pd.Index([1, 2]), np.dtype('int64')),
(pd.Index(['a', 'b']), np.dtype(object)),
('category', 'category'),
(pd.Categorical(['a', 'b']).dtype, CategoricalDtype(['a', 'b'])),
(pd.Categorical(['a', 'b']), CategoricalDtype(['a', 'b'])),
(pd.CategoricalIndex(['a', 'b']).dtype, CategoricalDtype(['a', 'b'])),
(pd.CategoricalIndex(['a', 'b']), CategoricalDtype(['a', 'b'])),
(CategoricalDtype(), CategoricalDtype()),
(CategoricalDtype(['a', 'b']), CategoricalDtype()),
(pd.DatetimeIndex([1, 2]), np.dtype('=M8[ns]')),
(pd.DatetimeIndex([1, 2]).dtype, np.dtype('=M8[ns]')),
('<M8[ns]', np.dtype('<M8[ns]')),
('datetime64[ns, Europe/London]', DatetimeTZDtype('ns', 'Europe/London')),
(pd.SparseSeries([1, 2], dtype='int32'), np.dtype('int32')),
(pd.SparseSeries([1, 2], dtype='int32').dtype, np.dtype('int32')),
(PeriodDtype(freq='D'), PeriodDtype(freq='D')),
('period[D]', PeriodDtype(freq='D')),
(IntervalDtype(), IntervalDtype()),
])
def test__get_dtype(input_param, result):
assert com._get_dtype(input_param) == result
@pytest.mark.parametrize('input_param', [None,
1, 1.2,
'random string',
pd.DataFrame([1, 2])])
def test__get_dtype_fails(input_param):
# python objects
pytest.raises(TypeError, com._get_dtype, input_param)
@pytest.mark.parametrize('input_param,result', [
(int, np.dtype(int).type),
('int32', np.int32),
(float, np.dtype(float).type),
('float64', np.float64),
(np.dtype('float64'), np.float64),
(str, np.dtype(str).type),
(pd.Series([1, 2], dtype=np.dtype('int16')), np.int16),
(pd.Series(['a', 'b']), np.object_),
(pd.Index([1, 2], dtype='int64'), np.int64),
(pd.Index(['a', 'b']), np.object_),
('category', com.CategoricalDtypeType),
(pd.Categorical(['a', 'b']).dtype, com.CategoricalDtypeType),
(pd.Categorical(['a', 'b']), com.CategoricalDtypeType),
(pd.CategoricalIndex(['a', 'b']).dtype, com.CategoricalDtypeType),
(pd.CategoricalIndex(['a', 'b']), com.CategoricalDtypeType),
(pd.DatetimeIndex([1, 2]), np.datetime64),
(pd.DatetimeIndex([1, 2]).dtype, np.datetime64),
('<M8[ns]', np.datetime64),
(pd.DatetimeIndex([1, 2], tz='Europe/London'), com.DatetimeTZDtypeType),
(pd.DatetimeIndex([1, 2], tz='Europe/London').dtype,
com.DatetimeTZDtypeType),
('datetime64[ns, Europe/London]', com.DatetimeTZDtypeType),
(pd.SparseSeries([1, 2], dtype='int32'), np.int32),
(pd.SparseSeries([1, 2], dtype='int32').dtype, np.int32),
(PeriodDtype(freq='D'), com.PeriodDtypeType),
('period[D]', com.PeriodDtypeType),
(IntervalDtype(), com.IntervalDtypeType),
(None, type(None)),
(1, type(None)),
(1.2, type(None)),
(pd.DataFrame([1, 2]), type(None)), # composite dtype
])
def test__get_dtype_type(input_param, result):
assert com._get_dtype_type(input_param) == result
| bsd-3-clause |
memmett/PyWENO | examples/discontinuous.py | 1 | 1204 | """PyWENO smooth reconstruction example."""
import numpy as np
import pyweno.weno
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def f(x):
r = np.zeros(x.shape)
i = x > 0
r[i] = np.cos(x[i])
i = x <= 0
r[i] = np.sin(x[i])
return r
def F(x):
r = np.zeros(x.shape)
i = x > 0
r[i] = np.sin(x[i])
i = x <= 0
r[i] = -np.cos(x[i])
return r
x = np.linspace(-2*np.pi, 2*np.pi, 41)
a = (F(x[1:]) - F(x[:-1]))/(x[1]-x[0])
a[20:21] = (F(x[21:22]) - 0.0)/(x[1]-x[0]) # fix middle cell average
l, s = pyweno.weno.reconstruct(a, 5, 'left', return_smoothness=True)
r = pyweno.weno.reconstruct(a, 5, 'right')
plt.title('pyweno.weno reconstruction and smoothness indicators')
plt.subplot(2,1,1)
x2 = np.linspace(x[0], x[-1], 1001)
plt.plot(x2, f(x2), '-k')
plt.plot(x[:-1], l, 'or')
plt.plot(x[1:], r, 'ob')
plt.ylabel('f')
plt.xlabel('x')
plt.legend(['actual', 'left', 'right'])
plt.subplot(2,1,2)
c = 0.5*(x[1:] + x[:-1])
plt.plot(c, s[:,0], 'or')
plt.plot(c, s[:,1], 'ok')
plt.plot(c, s[:,2], 'ob')
plt.ylabel('smoothness')
plt.xlabel('x')
plt.legend(['r=0', 'r=1', 'r=2'])
plt.savefig('discontinuous.png', format='png')
| bsd-3-clause |
Comflics/Exploring-OpenFOAM | laminarVortexShedding/strouhal.py | 3 | 1531 | #!/usr/bin/python
# Comflics: Exploring OpenFOAM
# Compute Strouhal Number of Laminar Vortex Shedding
# S. Huq, 13MAY17
#
import numpy as np
import scipy.signal as signal
import matplotlib.pyplot as plt
# # Read Results
data = np.loadtxt('./postProcessing/forceCoeffs/0/forceCoeffs.dat', skiprows=0)
L = 2 # L = D - Diameter
V = 1 # Velocity
time = data[:,0]
Cd = data[:,2]
Cl = data[:,3]
del data
# # Compute FFT
N = len(time)
dt = time[2] - time[1]
# # inaccurate FFT
# freq = np.fft.fftfreq(N, dt)
# Cd_fft = np.fft.fft(Cd)
# Cl_amp = np.fft.fft(Cl)
# plt.plot(freq, Cl_amp) # Figure 2.10
# plt.show()
# # Better stable FFT
nmax=512 # no. of points in the fft
# freq, Cd_amp = signal.welch(Cd, 1./dt, nperseg=nmax)
freq, Cl_amp = signal.welch(Cl, 1./dt, nperseg=nmax)
plt.plot(freq, Cl_amp) # Figure 2.10
plt.show()
# # Strouhal Number
# Find the index corresponding to max amplitude
Cl_max_fft_idx = np.argmax(abs(Cl_amp))
freq_shed = freq[Cl_max_fft_idx ]
St = freq_shed * L / V
print "Vortex shedding freq: %.3f [Hz]" % (freq_shed)
print "Strouhal Number: %.3f" % (St)
# # Explore Results
# #
# # Figure 2.8
# # See if there atleast 10 cycles of oscillation
# # improves the accuracy;
# plt.plot(time,Cl)
# plt.show()
# # Figure 2.9
# plt.plot(time,Cd)
# plt.show()
# #
# # Exercise
# # Exclude data before onset of the oscillations.
# # approx time = 200 s.
# # Hint: skiprows = 800 - 950
| gpl-2.0 |
ArcherSys/ArcherSys | Lib/site-packages/sphinx/ext/inheritance_diagram.py | 5 | 14183 | # -*- coding: utf-8 -*-
r"""
sphinx.ext.inheritance_diagram
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
import inspect
try:
from hashlib import md5
except ImportError:
from md5 import md5
from six import text_type
from six.moves import builtins
from docutils import nodes
from docutils.parsers.rst import directives
import sphinx
from sphinx.ext.graphviz import render_dot_html, render_dot_latex, \
render_dot_texinfo
from sphinx.pycode import ModuleAnalyzer
from sphinx.util import force_decode
from sphinx.util.compat import Directive
class_sig_re = re.compile(r'''^([\w.]*\.)? # module names
(\w+) \s* $ # class/final module name
''', re.VERBOSE)
class InheritanceException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that they inherit
from all the way to the root "object", and then is able to generate a
graphviz dot graph from them.
"""
def __init__(self, class_names, currmodule, show_builtins=False,
private_bases=False, parts=0):
"""*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
classes = self._import_classes(class_names, currmodule)
self.class_info = self._class_info(classes, show_builtins,
private_bases, parts)
if not self.class_info:
raise InheritanceException('No classes found for '
'inheritance diagram')
def _import_class_or_module(self, name, currmodule):
"""Import a class using its fully-qualified *name*."""
try:
path, base = class_sig_re.match(name).groups()
except (AttributeError, ValueError):
raise InheritanceException('Invalid class or module %r specified '
'for inheritance diagram' % name)
fullname = (path or '') + base
path = (path and path.rstrip('.') or '')
# two possibilities: either it is a module, then import it
try:
__import__(fullname)
todoc = sys.modules[fullname]
except ImportError:
# else it is a class, then import the module
if not path:
if currmodule:
# try the current module
path = currmodule
else:
raise InheritanceException(
'Could not import class %r specified for '
'inheritance diagram' % base)
try:
__import__(path)
todoc = getattr(sys.modules[path], base)
except (ImportError, AttributeError):
raise InheritanceException(
'Could not import class or module %r specified for '
'inheritance diagram' % (path + '.' + base))
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise InheritanceException('%r specified for inheritance diagram is '
'not a class or module' % name)
def _import_classes(self, class_names, currmodule):
"""Import a list of classes."""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name, currmodule))
return classes
def _class_info(self, classes, show_builtins, private_bases, parts):
"""Return name and bases for all classes that are ancestors of
*classes*.
*parts* gives the number of dotted name parts that is removed from the
displayed node names.
"""
all_classes = {}
py_builtins = vars(builtins).values()
def recurse(cls):
if not show_builtins and cls in py_builtins:
return
if not private_bases and cls.__name__.startswith('_'):
return
nodename = self.class_name(cls, parts)
fullname = self.class_name(cls, 0)
# Use first line of docstring as tooltip, if available
tooltip = None
try:
if cls.__doc__:
enc = ModuleAnalyzer.for_module(cls.__module__).encoding
doc = cls.__doc__.strip().split("\n")[0]
if not isinstance(doc, text_type):
doc = force_decode(doc, enc)
if doc:
tooltip = '"%s"' % doc.replace('"', '\\"')
except Exception: # might raise AttributeError for strange classes
pass
baselist = []
all_classes[cls] = (nodename, fullname, baselist, tooltip)
for base in cls.__bases__:
if not show_builtins and base in py_builtins:
continue
if not private_bases and base.__name__.startswith('_'):
continue
baselist.append(self.class_name(base, parts))
if base not in all_classes:
recurse(base)
for cls in classes:
recurse(cls)
return list(all_classes.values())
def class_name(self, cls, parts=0):
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
completely general.
"""
module = cls.__module__
if module in ('__builtin__', 'builtins'):
fullname = cls.__name__
else:
fullname = '%s.%s' % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""Get all of the class names involved in the graph."""
return [fullname for (_, fullname, _, _) in self.class_info]
# These are the default attrs for graphviz
default_graph_attrs = {
'rankdir': 'LR',
'size': '"8.0, 12.0"',
}
default_node_attrs = {
'shape': 'box',
'fontsize': 10,
'height': 0.25,
'fontname': '"Vera Sans, DejaVu Sans, Liberation Sans, '
'Arial, Helvetica, sans"',
'style': '"setlinewidth(0.5)"',
}
default_edge_attrs = {
'arrowsize': 0.5,
'style': '"setlinewidth(0.5)"',
}
def _format_node_attrs(self, attrs):
return ','.join(['%s=%s' % x for x in attrs.items()])
def _format_graph_attrs(self, attrs):
return ''.join(['%s=%s;\n' % x for x in attrs.items()])
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
"""Generate a graphviz dot graph from the classes that were passed in
to __init__.
*name* is the name of the graph.
*urls* is a dictionary mapping class names to HTTP URLs.
*graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing
key/value pairs to pass on as graphviz properties.
"""
g_attrs = self.default_graph_attrs.copy()
n_attrs = self.default_node_attrs.copy()
e_attrs = self.default_edge_attrs.copy()
g_attrs.update(graph_attrs)
n_attrs.update(node_attrs)
e_attrs.update(edge_attrs)
if env:
g_attrs.update(env.config.inheritance_graph_attrs)
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
res = []
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
for name, fullname, bases, tooltip in sorted(self.class_info):
# Write the node
this_node_attrs = n_attrs.copy()
if fullname in urls:
this_node_attrs['URL'] = '"%s"' % urls[fullname]
if tooltip:
this_node_attrs['tooltip'] = tooltip
res.append(' "%s" [%s];\n' %
(name, self._format_node_attrs(this_node_attrs)))
# Write the edges
for base_name in bases:
res.append(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_attrs(e_attrs)))
res.append('}\n')
return ''.join(res)
class inheritance_diagram(nodes.General, nodes.Element):
"""
A docutils node to use as a placeholder for the inheritance diagram.
"""
pass
class InheritanceDiagram(Directive):
"""
Run when the inheritance_diagram directive is first encountered.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {
'parts': directives.nonnegative_int,
'private-bases': directives.flag,
}
def run(self):
node = inheritance_diagram()
node.document = self.state.document
env = self.state.document.settings.env
class_names = self.arguments[0].split()
class_role = env.get_domain('py').role('class')
# Store the original content for use as a hash
node['parts'] = self.options.get('parts', 0)
node['content'] = ', '.join(class_names)
# Create a graph starting with the list of classes
try:
graph = InheritanceGraph(
class_names, env.ref_context.get('py:module'),
parts=node['parts'],
private_bases='private-bases' in self.options)
except InheritanceException as err:
return [node.document.reporter.warning(err.args[0],
line=self.lineno)]
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = class_role(
'class', ':class:`%s`' % name, name, 0, self.state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
return [node]
def get_graph_hash(node):
encoded = (node['content'] + str(node['parts'])).encode('utf-8')
return md5(encoded).hexdigest()[-10:]
def html_visit_inheritance_diagram(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
dotcode = graph.generate_dot(name, urls, env=self.builder.env)
render_dot_html(self, node, dotcode, [], 'inheritance', 'inheritance',
alt='Inheritance diagram of ' + node['content'])
raise nodes.SkipNode
def latex_visit_inheritance_diagram(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph.generate_dot(name, env=self.builder.env,
graph_attrs={'size': '"6.0,6.0"'})
render_dot_latex(self, node, dotcode, [], 'inheritance')
raise nodes.SkipNode
def texinfo_visit_inheritance_diagram(self, node):
"""
Output the graph for Texinfo. This will insert a PNG.
"""
graph = node['graph']
graph_hash = get_graph_hash(node)
name = 'inheritance%s' % graph_hash
dotcode = graph.generate_dot(name, env=self.builder.env,
graph_attrs={'size': '"6.0,6.0"'})
render_dot_texinfo(self, node, dotcode, [], 'inheritance')
raise nodes.SkipNode
def skip(self, node):
raise nodes.SkipNode
def setup(app):
app.setup_extension('sphinx.ext.graphviz')
app.add_node(
inheritance_diagram,
latex=(latex_visit_inheritance_diagram, None),
html=(html_visit_inheritance_diagram, None),
text=(skip, None),
man=(skip, None),
texinfo=(texinfo_visit_inheritance_diagram, None))
app.add_directive('inheritance-diagram', InheritanceDiagram)
app.add_config_value('inheritance_graph_attrs', {}, False),
app.add_config_value('inheritance_node_attrs', {}, False),
app.add_config_value('inheritance_edge_attrs', {}, False),
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
| mit |
pythonvietnam/scikit-learn | sklearn/neighbors/tests/test_ball_tree.py | 159 | 10196 | import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.ball_tree import (BallTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
rng = np.random.RandomState(10)
V = rng.rand(3, 3)
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'minkowski': dict(p=3),
'chebyshev': {},
'seuclidean': dict(V=np.random.random(DIMENSION)),
'wminkowski': dict(p=3, w=np.random.random(DIMENSION)),
'mahalanobis': dict(V=V)}
DISCRETE_METRICS = ['hamming',
'canberra',
'braycurtis']
BOOLEAN_METRICS = ['matching', 'jaccard', 'dice', 'kulsinski',
'rogerstanimoto', 'russellrao', 'sokalmichener',
'sokalsneath']
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_ball_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
bt = BallTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = bt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_ball_tree_query_boolean_metrics():
np.random.seed(0)
X = np.random.random((40, 10)).round(0)
Y = np.random.random((10, 10)).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in BOOLEAN_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_discrete_metrics():
np.random.seed(0)
X = (4 * np.random.random((40, 10))).round(0)
Y = (4 * np.random.random((10, 10))).round(0)
k = 5
def check_neighbors(metric):
bt = BallTree(X, leaf_size=1, metric=metric)
dist1, ind1 = bt.query(Y, k)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric)
assert_array_almost_equal(dist1, dist2)
for metric in DISCRETE_METRICS:
yield check_neighbors, metric
def test_ball_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = bt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_ball_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
bt = BallTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = bt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_ball_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
bt = BallTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = bt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true,
atol=atol, rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
bt = BallTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old version of scipy, doesn't accept "
"explicit bandwidth.")
dens_bt = bt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_bt, dens_gkde, decimal=3)
def test_ball_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
bt = BallTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = bt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_ball_tree_pickle():
np.random.seed(0)
X = np.random.random((10, 3))
bt1 = BallTree(X, leaf_size=1)
# Test if BallTree with callable metric is picklable
bt1_pyfunc = BallTree(X, metric=dist_func, leaf_size=1, p=2)
ind1, dist1 = bt1.query(X)
ind1_pyfunc, dist1_pyfunc = bt1_pyfunc.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(bt1, protocol=protocol)
bt2 = pickle.loads(s)
s_pyfunc = pickle.dumps(bt1_pyfunc, protocol=protocol)
bt2_pyfunc = pickle.loads(s_pyfunc)
ind2, dist2 = bt2.query(X)
ind2_pyfunc, dist2_pyfunc = bt2_pyfunc.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1_pyfunc, ind2_pyfunc)
assert_array_almost_equal(dist1_pyfunc, dist2_pyfunc)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
def test_query_haversine():
np.random.seed(0)
X = 2 * np.pi * np.random.random((40, 2))
bt = BallTree(X, leaf_size=1, metric='haversine')
dist1, ind1 = bt.query(X, k=5)
dist2, ind2 = brute_force_neighbors(X, X, k=5, metric='haversine')
assert_array_almost_equal(dist1, dist2)
assert_array_almost_equal(ind1, ind2)
| bsd-3-clause |
runauto/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
raghavrv/scikit-learn | examples/feature_selection/plot_f_test_vs_mi.py | 82 | 1671 | """
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is
completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y, edgecolor='black', s=20)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
| bsd-3-clause |
marcocaccin/scikit-learn | examples/classification/plot_classifier_comparison.py | 66 | 4895 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "Linear Discriminant Analysis",
"Quadratic Discriminant Analysis"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LinearDiscriminantAnalysis(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
plt.show()
| bsd-3-clause |
WojciechMigda/KAGGLE-prudential-life-insurance-assessment | src/OptimizedOffsetRegressor.py | 1 | 10844 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
################################################################################
#
# Copyright (c) 2016 Wojciech Migda
# All rights reserved
# Distributed under the terms of the MIT license
#
################################################################################
#
# Filename: OptimizedOffsetRegressor.py
#
# Decription:
# Regressor implementing optimized ofsets in a scikit-learn fashion.
# Based on scripts on Kaggle
#
# Authors:
# Wojciech Migda
#
################################################################################
#
# History:
# --------
# Date Who Ticket Description
# ---------- --- --------- ------------------------------------------------
# 2016-01-23 wm Initial version
#
################################################################################
"""
from __future__ import print_function
__author__ = 'Wojciech Migda'
__date__ = '2016-01-23'
__version__ = '0.0.1'
__all__ = [
'OptimizedOffsetRegressor'
]
from sklearn.base import BaseEstimator, RegressorMixin
class OptimizedOffsetRegressor(BaseEstimator, RegressorMixin):
def __init__(self, n_jobs=-1, offset_scale=1.0, n_buckets=2, initial_offsets=None, scoring='accuracy'):
self.n_jobs = int(n_jobs)
self.offset_scale = float(offset_scale)
self.n_buckets = int(n_buckets)
if initial_offsets is None:
self.initial_offsets_ = [-0.5] * self.n_buckets
pass
else:
self.initial_offsets_ = list(initial_offsets)
assert(len(self.initial_offsets_) == self.n_buckets)
pass
from sklearn.metrics import get_scorer
self.scoring = get_scorer(scoring)
pass
def __call__(self, args):
return self.OffsetMinimizer_(args)
def apply_offset(self, data, bin_offset, sv):
mask = data[0].astype(int) == sv
data[1, mask] = data[0, mask] + bin_offset
return data
def OffsetMinimizer_(self, args):
def apply_offset_and_score(data, bin_offset, sv):
data = self.apply_offset(data, bin_offset, sv)
return self.scoring(data[1], data[2])
j, data, offset0 = args
from scipy.optimize import fmin_powell
return fmin_powell(lambda x: apply_offset_and_score(data, x, j), offset0, disp=True)
def fit(self, X, y):
from multiprocessing import Pool
pool = Pool(processes=None if self.n_jobs is -1 else self.n_jobs)
from numpy import vstack
self.data_ = vstack((X, X, y))
for j in range(self.n_buckets):
self.data_ = self.apply_offset(self.data_, self.initial_offsets_[j], j)
from numpy import array
self.offsets_ = array(pool.map(self,
zip(range(self.n_buckets),
[self.data_] * self.n_buckets,
self.initial_offsets_)))
# self.offsets_ = array(map(self,
# zip(range(self.n_buckets),
# [self.data_] * self.n_buckets,
# self.initial_offsets_)))
return self
def predict(self, X):
from numpy import vstack
data = vstack((X, X))
for j in range(self.n_buckets):
data = self.apply_offset(data, self.offsets_[j], j)
return data[1]
pass
class DigitizedOptimizedOffsetRegressor(BaseEstimator, RegressorMixin):
def __init__(self,
n_jobs=-1,
offset_scale=1.0,
n_buckets=2,
initial_params=None,
minimizer='BFGS',
basinhopping=False,
scoring='accuracy'):
from numpy import array
self.n_jobs = int(n_jobs)
self.offset_scale = float(offset_scale)
self.n_buckets = int(n_buckets)
if initial_params is None:
#self.initial_offsets_ = [-0.5] * self.n_buckets
pass
else:
self.params = array(initial_params)
#assert(len(self.initial_offsets_) == self.n_buckets)
pass
self.minimizer = minimizer
self.basinhopping = basinhopping
from sklearn.metrics import get_scorer
self.scoring = get_scorer(scoring)
pass
def apply_params(self, params, data):
from numpy import digitize
offsets = params[:self.n_buckets][::-1]
# both give #40: 0.67261
#splits = [1., 2., 3., 4., 5., 6., 7.]
#response = digitize(data[0], splits)
#splits = [2., 3., 4., 5., 6., 7., 8.]
#response = digitize(data[0], splits) + 1
from numpy import linspace
splits = linspace(0, 7, self.n_buckets + 1)[1:-1] + 1
#print(splits)
response = digitize(data[0], splits)
#from numpy import bincount
#print(bincount(response))
for i, off in enumerate(offsets):
mask = response == i
data[1, mask] = data[0, mask] + offsets[i]
return data
def apply_params_and_score(self, params, data):
data = self.apply_params(params, data)
return self.scoring(data[1], data[2])
#return -self.scoring(data[1], data[2]) ** 2
def fit(self, X, y):
from numpy import vstack
data = vstack((X, X, y))
from scipy.optimize import minimize,approx_fprime
minimizer_kwargs = {
'args': (data,),
'method': self.minimizer,
'jac': lambda x, args:
approx_fprime(x, self.apply_params_and_score, 0.05, args),
'tol': 1e-4,
'options': {'disp': True}
}
if not self.basinhopping:
# from sys import path as sys_path
# sys_path.insert(0, './hyperopt')
# from hyperopt import fmin, tpe, hp
# space = {i: hp.uniform(str(i), -4, 4) for i in range(self.n_buckets)}
# #from hyperopt import Trials
# #trials = Trials()
# best = fmin(fn=lambda space: self.apply_params_and_score([space[i] for i in range(self.n_buckets)], data),
# space=space,
# algo=tpe.suggest,
# max_evals=1000,
# #trials=trials
# )
# print(best, self.apply_params_and_score([best[str(i)] for i in range(self.n_buckets)], data))
optres = minimize(
self.apply_params_and_score,
self.params,
**minimizer_kwargs)
pass
else:
from scipy.optimize import basinhopping
optres = basinhopping(
self.apply_params_and_score,
self.params,
niter=100,
T=0.05,
stepsize=0.10,
minimizer_kwargs=minimizer_kwargs)
minimizer_kwargs['method'] = 'BFGS'
optres = minimize(
self.apply_params_and_score,
optres.x,
**minimizer_kwargs)
pass
print(optres)
self.params = optres.x
return self
def predict(self, X):
from numpy import vstack
data = vstack((X, X))
params = self.params.copy()
params[:self.n_buckets] = self.offset_scale * params[:self.n_buckets]
data = self.apply_params(params, data)
return data[1]
pass
class FullDigitizedOptimizedOffsetRegressor(BaseEstimator, RegressorMixin):
def __init__(self,
n_jobs=-1,
offset_scale=1.0,
n_buckets=2,
initial_params=None,
minimizer='BFGS',
basinhopping=False,
scoring='accuracy'):
from numpy import array
self.n_jobs = int(n_jobs)
self.offset_scale = float(offset_scale)
self.n_buckets = int(n_buckets)
if initial_params is None:
#self.initial_offsets_ = [-0.5] * self.n_buckets
pass
else:
self.params = array(initial_params)
#assert(len(self.initial_offsets_) == self.n_buckets)
pass
self.minimizer = minimizer
self.basinhopping = basinhopping
from sklearn.metrics import get_scorer
self.scoring = get_scorer(scoring)
pass
def apply_params(self, params, data):
from numpy import digitize
offsets = params[:self.n_buckets]
splits = sorted(list(params[self.n_buckets:2 * self.n_buckets - 1]))
response = digitize(data[0], splits)
for i, off in enumerate(offsets):
mask = response == i
data[1, mask] = data[0, mask] + offsets[i]
return data
def apply_params_and_score(self, params, data):
data = self.apply_params(params, data)
return self.scoring(data[1], data[2])
def fit(self, X, y):
from numpy import vstack
data = vstack((X, X, y))
from scipy.optimize import minimize,approx_fprime
minimizer_kwargs = {
'args': (data,),
'method': self.minimizer,
'jac': lambda x, args:
approx_fprime(x, self.apply_params_and_score, 0.05, args),
'tol': 3e-2 if self.minimizer == 'BFGS' else 1e-4,
'options': {'disp': True}
}
if not self.basinhopping:
optres = minimize(
self.apply_params_and_score,
self.params,
**minimizer_kwargs)
pass
else:
from scipy.optimize import basinhopping
optres = basinhopping(
self.apply_params_and_score,
self.params,
niter=250,
T=0.05,
stepsize=0.10,
minimizer_kwargs=minimizer_kwargs)
minimizer_kwargs['method'] = 'BFGS'
minimizer_kwargs['tol'] = 1e-2
minimizer_kwargs['jac'] = lambda x, args: \
approx_fprime(x, self.apply_params_and_score, 0.01, args)
optres = minimize(
self.apply_params_and_score,
optres.x,
**minimizer_kwargs)
pass
print(optres)
self.params = optres.x
return self
def predict(self, X):
from numpy import vstack
data = vstack((X, X))
params = self.params.copy()
params[:self.n_buckets] = self.offset_scale * params[:self.n_buckets]
data = self.apply_params(params, data)
return data[1]
pass
if __name__ == "__main__":
pass
| mit |
ua-snap/downscale | snap_scripts/epscor_sc/older_epscor_sc_scripts_archive/seasonal_aggregations_annual_epscor_se_CLI.py | 1 | 10180 | def sort_files( files, split_on='_', elem_month=-2, elem_year=-1 ):
'''
sort a list of files properly using the month and year parsed
from the filename. This is useful with SNAP data since the standard
is to name files like '<prefix>_MM_YYYY.tif'. If sorted using base
Pythons sort/sorted functions, things will be sorted by the first char
of the month, which makes thing go 1, 11, ... which sucks for timeseries
this sorts it properly following SNAP standards as the default settings.
ARGUMENTS:
----------
files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob.
split_on = [str] `str` character to split the filename on. default:'_', SNAP standard.
elem_month = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-2. For SNAP standard.
elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-1. For SNAP standard.
RETURNS:
--------
sorted `list` by month and year ascending.
'''
import pandas as pd
months = [ int(fn.split('.')[0].split( split_on )[elem_month]) for fn in files ]
years = [ int(fn.split('.')[0].split( split_on )[elem_year]) for fn in files ]
df = pd.DataFrame( {'fn':files, 'month':months, 'year':years} )
df_sorted = df.sort_values( ['year', 'month' ] )
return df_sorted.fn.tolist()
def only_years( files, begin=1901, end=2100, split_on='_', elem_year=-1 ):
'''
return new list of filenames where they are truncated to begin:end
ARGUMENTS:
----------
files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob.
begin = [int] four digit integer year of the begin time default:1901
end = [int] four digit integer year of the end time default:2100
split_on = [str] `str` character to split the filename on. default:'_', SNAP standard.
elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax.
default:-1. For SNAP standard.
RETURNS:
--------
sliced `list` to begin and end year.
'''
import pandas as pd
years = [ int(fn.split('.')[0].split( split_on )[elem_year]) for fn in files ]
df = pd.DataFrame( { 'fn':files, 'year':years } )
df_slice = df[ (df.year >= begin ) & (df.year <= end ) ]
return df_slice.fn.tolist()
def get_month_seaon( fn ):
# seasons
seasonal_lookup = { 1:'DJF', 2:'DJF', 3:'MAM', 4:'MAM', 5:'MAM', \
6:'JJA', 7:'JJA', 8:'JJA',\
9:'SON', 10:'SON', 11:'SON', 12:'DJF' }
fn = os.path.basename( fn )
month, year = fn.replace( '.tif', '' ).split( '_' )[-2:]
return seasonal_lookup[ int(month) ]
def get_year( fn ):
fn = os.path.basename( fn )
month, year = fn.replace( '.tif', '' ).split( '_' )[-2:]
return year
def read_raster( fn, band=1 ):
'''
clean way to open / read and properly close a GTiff
'''
import rasterio
with rasterio.open( fn ) as out:
arr = out.read( band )
return arr
def calc_seasonal_mean( season_name, files, output_path, agg_metric='mean', *args, **kwargs ):
'''
calculate seasonal means
'''
years = [ int( get_year( fn ) ) for fn in files ]
year = str( max( years ) )
fn = files[0]
rst = rasterio.open( fn )
mask = rst.read_masks( 1 )
meta = rst.meta
if 'transform' in meta.keys():
meta.pop( 'transform' )
meta.update( compress='lzw' )
metric_switch = { 'mean':np.mean, 'total':np.sum, 'min':np.min, 'max':np.max }
variable, metric, units, project, model, scenario = os.path.basename( fn ).split( '.' )[0].split( '_' )[:-2]
arr = metric_switch[ agg_metric ]( [ read_raster( i ) for i in files ], axis=0 )
arr[ mask == 0 ] = meta[ 'nodata' ]
output_filename = os.path.join( output_path, model, scenario, variable, '_'.join([ variable, agg_metric, units, project, model, scenario, season_name, year]) + '.tif' )
dirname = os.path.dirname( output_filename )
try:
if not os.path.exists( dirname ):
os.makedirs( dirname )
except:
pass
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( arr, 1 )
return output_filename
def wrap( x ):
'''
multiprocessing wrapper for clean
argument handling without lambda
'''
return calc_seasonal_mean( *x )
def make_seasonals( base_path, output_path, model, scenario, variable, begin, end, ncpus ):
'''
function to calculate and output mean seasonal monthly data across decades
ARGUMENTS:
----------
base_path = [ ]
output_path = [ ]
model = [ ]
scenario = [ ]
variable = [ ]
begin = [ ]
end = [ ]
ncpus = [ ]
RETURNS
-------
output_directory of newly produced GeoTiffs if successful. else, error.
'''
# modeled data
files = glob.glob( os.path.join( base_path, model, scenario, variable, '*.tif' ) )
files = sort_files( only_years( files, begin=begin, end=end, split_on='_', elem_year=-1 ) )
season_names = [ get_month_seaon( fn ) for fn in files ]
years = [ int(get_year( fn )) for fn in files ]
# min / max years
start_year = str( min(years) )
end_year = str( max(years) )
# drop data for start_year JF and end_year D
files = [ fn for fn in files if not '_'.join([ '01',start_year ]) in fn if not '_'.join([ '02',start_year ]) in fn if not '_'.join([ '12',end_year ]) in fn ]
files = pd.Series( files )
split_n = len( files ) / 3
grouped_seasons = np.split( np.array( files ), split_n )
season_names = [ get_month_seaon( i[0] ) for i in grouped_seasons ]
seasons = zip( season_names, grouped_seasons )
args = [ ( season_name, file_group, output_path ) for season_name, file_group in seasons ]
pool = mp.Pool( ncpus )
out = pool.map( lambda x: wrap( x ), args )
pool.close()
pool.join()
pool.terminate()
pool = None
return output_path
if __name__ == '__main__':
import os, glob, itertools, rasterio
import xarray as xr
import pandas as pd
import numpy as np
from pathos import multiprocessing as mp
import argparse
'''
this tool assumes that the data are stored in a directory structure as follows:
base_path
model
scenario
variable
FILES
'''
# parse the commandline arguments
parser = argparse.ArgumentParser( description='downscale the AR5-CMIP5 data to the AKCAN extent required by SNAP' )
parser.add_argument( "-b", "--base_path", action='store', dest='base_path', type=str, help="path to the directory where the downscaled modeled data are stored" )
parser.add_argument( "-o", "--output_path", action='store', dest='output_path', type=str, help="path to the output directory" )
parser.add_argument( "-m", "--model", action='store', dest='model', type=str, help="model name (exact)" )
parser.add_argument( "-s", "--scenario", action='store', dest='scenario', type=str, help="scenario name (exact)" )
parser.add_argument( "-p", "--project", action='store', dest='project', type=str, help="project name (exact)" )
parser.add_argument( "-v", "--variable", action='store', dest='variable', type=str, help="cmip5 variable name (exact)" )
parser.add_argument( "-am", "--agg_metric", action='store', dest='agg_metric', type=str, help="string name of the metric to compute the decadal summary - mean, max, min, total" )
parser.add_argument( "-nc", "--ncpus", action='store', dest='ncpus', type=int, help="number of cpus to use in multiprocessing" )
args = parser.parse_args()
# unpack for cleaner var access:
base_path = args.base_path
output_path = args.output_path
model = args.model
scenario = args.scenario
project = args.project
variable = args.variable
ncpus = args.ncpus
agg_metric = args.agg_metric
# switches to deal with different date groups. Hardwired to CMIP5 and CRU TS323 currently.
cmip_switch = { 'historical':(1900,2005), 'rcp26':(2005,2100), 'rcp45':(2005,2100), 'rcp60':(2005,2100), 'rcp85':(2006,2100) }
cru_switch = { 'historical':(1901,2014) }
project_switch = { 'cmip5':cmip_switch, 'cru':cru_switch }
begin, end = project_switch[ project ][ scenario ]
print( 'running: {} {} {}'.format( model, scenario, variable ) )
_ = make_seasonals( base_path, output_path, model, scenario, variable, begin, end, ncpus )
# # # # EXAMPLE RUN # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # FOR TESTING THEN REMOVE
# setup args
# import subprocess, os
# base_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/downscaled_cru_clipped'
# output_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/derived_outputs/monthly_decadals'
# ncpus = 32
# project = 'cru' # 'cmip5'
# variables = [ 'tasmin', 'tasmax', 'tas', 'pr' ]
# models = [ 'ts323' ] # [ 'IPSL-CM5A-LR', 'MRI-CGCM3', 'GISS-E2-R', 'GFDL-CM3', 'CCSM4', '5ModelAvg' ] #
# scenarios = [ 'historical'] # [ 'historical', 'rcp26', 'rcp45', 'rcp60', 'rcp85' ] # [ 'historical' ]
# for model in models:
# for scenario in scenarios:
# for variable in variables:
# agg_metric = 'mean'
# # if variable == 'pr':
# # agg_metric = 'total'
# # else:
# # agg_metric = 'mean'
# os.chdir( '/workspace/UA/malindgren/repos/downscale/snap_scripts' )
# command = ' '.join([ 'ipython', 'compute_decadal_grids_epscor_se.py', '--', '-b', base_path, '-o ', output_path, '-m ', model , '-s', scenario, '-p', project, '-v', variable ,'-am', agg_metric ,'-nc', str(ncpus) ])
# os.system( command )
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# some setup
base_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/downscaled_cmip5_clipped'
output_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/derived_outputs/seasonal_annuals'
project = 'cmip5'
models = [ 'IPSL-CM5A-LR', 'MRI-CGCM3', 'GISS-E2-R', 'GFDL-CM3', 'CCSM4', '5ModelAvg' ]
scenarios = [ 'historical', 'rcp26', 'rcp45', 'rcp60', 'rcp85' ]
variables = [ 'tasmin', 'tasmax', 'pr', 'tas' ]
ncpus = 32
# run all combinations
for model, scenario, variable in itertools.product( models, scenarios, variables ):
print( 'running: {} {} {}'.format( model, scenario, variable ) )
begin, end = project_switch[ project ][ scenario ]
_ = main( base_path, output_path, model, scenario, variable, begin, end, ncpus )
| mit |
mikecroucher/GPy | GPy/models/sparse_gplvm.py | 6 | 1890 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import sys
from .sparse_gp_regression import SparseGPRegression
from ..core import Param
class SparseGPLVM(SparseGPRegression):
"""
Sparse Gaussian Process Latent Variable Model
:param Y: observed data
:type Y: np.ndarray
:param input_dim: latent dimensionality
:type input_dim: int
:param init: initialisation method for the latent space
:type init: 'PCA'|'random'
"""
def __init__(self, Y, input_dim, X=None, kernel=None, init='PCA', num_inducing=10):
if X is None:
from ..util.initialization import initialize_latent
X, fracs = initialize_latent(init, input_dim, Y)
X = Param('latent space', X)
SparseGPRegression.__init__(self, X, Y, kernel=kernel, num_inducing=num_inducing)
self.link_parameter(self.X, 0)
def parameters_changed(self):
super(SparseGPLVM, self).parameters_changed()
self.X.gradient = self.kern.gradients_X_diag(self.grad_dict['dL_dKdiag'], self.X)
self.X.gradient += self.kern.gradients_X(self.grad_dict['dL_dKnm'], self.X, self.Z)
def plot_latent(self, labels=None, which_indices=None,
resolution=50, ax=None, marker='o', s=40,
fignum=None, plot_inducing=True, legend=True,
plot_limits=None,
aspect='auto', updates=False, predict_kwargs={}, imshow_kwargs={}):
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
from ..plotting.matplot_dep import dim_reduction_plots
return dim_reduction_plots.plot_latent(self, labels, which_indices,
resolution, ax, marker, s,
fignum, plot_inducing, legend,
plot_limits, aspect, updates, predict_kwargs, imshow_kwargs)
| bsd-3-clause |
Myasuka/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
pratapvardhan/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
dvornikita/blitznet | training.py | 1 | 14424 | #!/usr/bin/env python3
from config import get_logging_config, args, train_dir
from config import config as net_config
import time
import os
import sys
import socket
import logging
import logging.config
import subprocess
import tensorflow as tf
import numpy as np
import matplotlib
matplotlib.use('Agg')
from vgg import VGG
from resnet import ResNet
from utils import print_variables
from utils_tf import yxyx_to_xywh, data_augmentation
from datasets import get_dataset
from boxer import PriorBoxGrid
slim = tf.contrib.slim
streaming_mean_iou = tf.contrib.metrics.streaming_mean_iou
logging.config.dictConfig(get_logging_config(args.run_name))
log = logging.getLogger()
def objective(location, confidence, refine_ph, classes_ph,
pos_mask, seg_logits, seg_gt, dataset, config):
def smooth_l1(x, y):
abs_diff = tf.abs(x-y)
return tf.reduce_sum(tf.where(abs_diff < 1,
0.5*abs_diff*abs_diff,
abs_diff - 0.5),
1)
def segmentation_loss(seg_logits, seg_gt, config):
mask = seg_gt <= dataset.num_classes
seg_logits = tf.boolean_mask(seg_logits, mask)
seg_gt = tf.boolean_mask(seg_gt, mask)
seg_predictions = tf.argmax(seg_logits, axis=1)
seg_loss_local = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=seg_logits,
labels=seg_gt)
seg_loss = tf.reduce_mean(seg_loss_local)
tf.summary.scalar('loss/segmentation', seg_loss)
mean_iou, update_mean_iou = streaming_mean_iou(seg_predictions, seg_gt,
dataset.num_classes)
tf.summary.scalar('accuracy/mean_iou', mean_iou)
return seg_loss, mean_iou, update_mean_iou
def detection_loss(location, confidence, refine_ph, classes_ph, pos_mask):
neg_mask = tf.logical_not(pos_mask)
number_of_positives = tf.reduce_sum(tf.to_int32(pos_mask))
true_number_of_negatives = tf.minimum(3 * number_of_positives,
tf.shape(pos_mask)[1] - number_of_positives)
# max is to avoid the case where no positive boxes were sampled
number_of_negatives = tf.maximum(1, true_number_of_negatives)
num_pos_float = tf.to_float(tf.maximum(1, number_of_positives))
normalizer = tf.to_float(tf.add(number_of_positives, number_of_negatives))
tf.summary.scalar('batch/size', normalizer)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=confidence,
labels=classes_ph)
pos_class_loss = tf.reduce_sum(tf.boolean_mask(cross_entropy, pos_mask))
tf.summary.scalar('loss/class_pos', pos_class_loss / num_pos_float)
top_k_worst, top_k_inds = tf.nn.top_k(tf.boolean_mask(cross_entropy, neg_mask),
number_of_negatives)
# multiplication is to avoid the case where no positive boxes were sampled
neg_class_loss = tf.reduce_sum(top_k_worst) * \
tf.cast(tf.greater(true_number_of_negatives, 0), tf.float32)
class_loss = (neg_class_loss + pos_class_loss) / num_pos_float
tf.summary.scalar('loss/class_neg', neg_class_loss / tf.to_float(number_of_negatives))
tf.summary.scalar('loss/class', class_loss)
# cond is to avoid the case where no positive boxes were sampled
bbox_loss = tf.cond(tf.equal(tf.reduce_sum(tf.cast(pos_mask, tf.int32)), 0),
lambda: 0.0,
lambda: tf.reduce_mean(smooth_l1(tf.boolean_mask(location, pos_mask),
tf.boolean_mask(refine_ph, pos_mask))))
tf.summary.scalar('loss/bbox', bbox_loss)
inferred_class = tf.cast(tf.argmax(confidence, 2), tf.int32)
positive_matches = tf.equal(tf.boolean_mask(inferred_class, pos_mask),
tf.boolean_mask(classes_ph, pos_mask))
hard_matches = tf.equal(tf.boolean_mask(inferred_class, neg_mask),
tf.boolean_mask(classes_ph, neg_mask))
hard_matches = tf.gather(hard_matches, top_k_inds)
train_acc = ((tf.reduce_sum(tf.to_float(positive_matches)) +
tf.reduce_sum(tf.to_float(hard_matches))) / normalizer)
tf.summary.scalar('accuracy/train', train_acc)
recognized_class = tf.argmax(confidence, 2)
tp = tf.reduce_sum(tf.to_float(tf.logical_and(recognized_class > 0, pos_mask)))
fp = tf.reduce_sum(tf.to_float(tf.logical_and(recognized_class > 0, neg_mask)))
fn = tf.reduce_sum(tf.to_float(tf.logical_and(tf.equal(recognized_class, 0), pos_mask)))
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2*(precision * recall)/(precision + recall)
tf.summary.scalar('metrics/train/precision', precision)
tf.summary.scalar('metrics/train/recall', recall)
tf.summary.scalar('metrics/train/f1', f1)
return class_loss, bbox_loss, train_acc, number_of_positives
the_loss = 0
train_acc = tf.constant(1)
mean_iou = tf.constant(1)
update_mean_iou = tf.constant(1)
if args.segment:
seg_loss, mean_iou, update_mean_iou = segmentation_loss(seg_logits, seg_gt, config)
the_loss += seg_loss
if args.detect:
class_loss, bbox_loss, train_acc, number_of_positives =\
detection_loss(location, confidence, refine_ph, classes_ph, pos_mask)
det_loss = class_loss + bbox_loss
the_loss += det_loss
regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
wd_loss = tf.add_n(regularization_losses)
tf.summary.scalar('loss/weight_decay', wd_loss)
the_loss += wd_loss
tf.summary.scalar('loss/full', the_loss)
return the_loss, train_acc, mean_iou, update_mean_iou
def extract_batch(dataset, config):
with tf.device("/cpu:0"):
bboxer = PriorBoxGrid(config)
data_provider = slim.dataset_data_provider.DatasetDataProvider(
dataset, num_readers=2,
common_queue_capacity=512, common_queue_min=32)
if args.segment:
im, bbox, gt, seg = data_provider.get(['image', 'object/bbox', 'object/label',
'image/segmentation'])
else:
im, bbox, gt = data_provider.get(['image', 'object/bbox', 'object/label'])
seg = tf.expand_dims(tf.zeros(tf.shape(im)[:2]), 2)
im = tf.to_float(im)/255
bbox = yxyx_to_xywh(tf.clip_by_value(bbox, 0.0, 1.0))
im, bbox, gt, seg = data_augmentation(im, bbox, gt, seg, config)
inds, cats, refine = bboxer.encode_gt_tf(bbox, gt)
return tf.train.shuffle_batch([im, inds, refine, cats, seg],
args.batch_size, 2048, 64, num_threads=4)
def train(dataset, net, config):
image_ph, inds_ph, refine_ph, classes_ph, seg_gt = extract_batch(dataset, config)
net.create_trunk(image_ph)
if args.detect:
net.create_multibox_head(dataset.num_classes)
confidence = net.outputs['confidence']
location = net.outputs['location']
tf.summary.histogram('location', location)
tf.summary.histogram('confidence', confidence)
else:
location, confidence = None, None
if args.segment:
net.create_segmentation_head(dataset.num_classes)
seg_logits = net.outputs['segmentation']
tf.summary.histogram('segmentation', seg_logits)
else:
seg_logits = None
loss, train_acc, mean_iou, update_mean_iou = objective(location, confidence, refine_ph,
classes_ph,inds_ph, seg_logits,
seg_gt, dataset, config)
### setting up the learning rate ###
global_step = slim.get_or_create_global_step()
learning_rate = args.learning_rate
learning_rates = [args.warmup_lr, learning_rate]
steps = [args.warmup_step]
if len(args.lr_decay) > 0:
for i, step in enumerate(args.lr_decay):
steps.append(step)
learning_rates.append(learning_rate*10**(-i-1))
learning_rate = tf.train.piecewise_constant(tf.to_int32(global_step),
steps, learning_rates)
tf.summary.scalar('learning_rate', learning_rate)
#######
if args.optimizer == 'adam':
opt = tf.train.AdamOptimizer(learning_rate)
elif args.optimizer == 'nesterov':
opt = tf.train.MomentumOptimizer(learning_rate, 0.9, use_nesterov=True)
else:
raise ValueError
train_vars = tf.trainable_variables()
print_variables('train', train_vars)
train_op = slim.learning.create_train_op(
loss, opt,
global_step=global_step,
variables_to_train=train_vars,
summarize_gradients=True)
summary_op = tf.summary.merge_all()
saver = tf.train.Saver(tf.global_variables(), max_to_keep=1000, keep_checkpoint_every_n_hours=1)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)) as sess:
summary_writer = tf.summary.FileWriter(train_dir, sess.graph)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
if args.random_trunk_init:
print("Training from scratch")
else:
init_assign_op, init_feed_dict, init_vars = net.get_imagenet_init(opt)
print_variables('init from ImageNet', init_vars)
sess.run(init_assign_op, feed_dict=init_feed_dict)
ckpt = tf.train.get_checkpoint_state(train_dir)
if ckpt and ckpt.model_checkpoint_path:
if args.ckpt == 0:
ckpt_to_restore = ckpt.model_checkpoint_path
else:
ckpt_to_restore = train_dir+'/model.ckpt-%i' % args.ckpt
log.info("Restoring model %s..." % ckpt_to_restore)
saver.restore(sess, ckpt_to_restore)
starting_step = sess.run(global_step)
tf.get_default_graph().finalize()
summary_writer = tf.summary.FileWriter(train_dir, sess.graph)
log.info("Launching prefetch threads")
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
log.info("Starting training...")
for step in range(starting_step, args.max_iterations+1):
start_time = time.time()
try:
train_loss, acc, iou, _, lr = sess.run([train_op, train_acc, mean_iou,
update_mean_iou, learning_rate])
except (tf.errors.OutOfRangeError, tf.errors.CancelledError):
break
duration = time.time() - start_time
num_examples_per_step = args.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('step %d, loss = %.2f, acc = %.2f, iou=%f, lr=%.3f (%.1f examples/sec; %.3f '
'sec/batch)')
log.info(format_str % (step, train_loss, acc, iou, -np.log10(lr),
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
if step % 1000 == 0 and step > 0:
summary_writer.flush()
log.debug("Saving checkpoint...")
checkpoint_path = os.path.join(train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
summary_writer.close()
coord.request_stop()
coord.join(threads)
def main(argv=None): # pylint: disable=unused-argument
assert args.detect or args.segment, "Either detect or segment should be True"
if args.trunk == 'resnet50':
net = ResNet
depth = 50
if args.trunk == 'vgg16':
net = VGG
depth = 16
net = net(config=net_config, depth=depth, training=True, weight_decay=args.weight_decay)
if args.dataset == 'voc07':
dataset = get_dataset('voc07_trainval')
if args.dataset == 'voc12-trainval':
dataset = get_dataset('voc12-train-segmentation', 'voc12-val')
if args.dataset == 'voc12-train':
dataset = get_dataset('voc12-train-segmentation')
if args.dataset == 'voc12-val':
dataset = get_dataset('voc12-val-segmentation')
if args.dataset == 'voc07+12':
dataset = get_dataset('voc07_trainval', 'voc12_train', 'voc12_val')
if args.dataset == 'voc07+12-segfull':
dataset = get_dataset('voc07-trainval-segmentation', 'voc12-train-segmentation', 'voc12-val')
if args.dataset == 'voc07+12-segmentation':
dataset = get_dataset('voc07-trainval-segmentation', 'voc12-train-segmentation')
if args.dataset == 'coco':
# support by default for coco trainval35k split
dataset = get_dataset('coco-train2014-*', 'coco-valminusminival2014-*')
if args.dataset == 'coco-seg':
# support by default for coco trainval35k split
dataset = get_dataset('coco-seg-train2014-*', 'coco-seg-valminusminival2014-*')
train(dataset, net, net_config)
if __name__ == '__main__':
exec_string = ' '.join(sys.argv)
log.debug("Executing a command: %s", exec_string)
cur_commit = subprocess.check_output("git log -n 1 --pretty=format:\"%H\"".split())
cur_branch = subprocess.check_output("git rev-parse --abbrev-ref HEAD".split())
git_diff = subprocess.check_output('git diff --no-color'.split()).decode('ascii')
log.debug("on branch %s with the following diff from HEAD (%s):" % (cur_branch, cur_commit))
log.debug(git_diff)
hostname = socket.gethostname()
if 'gpuhost' in hostname:
gpu_id = os.environ["CUDA_VISIBLE_DEVICES"]
nvidiasmi = subprocess.check_output('nvidia-smi').decode('ascii')
log.debug("Currently we are on %s and use gpu%s:" % (hostname, gpu_id))
log.debug(nvidiasmi)
tf.app.run()
| mit |
sumspr/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
viveksck/langchangetrack | langchangetrack/tsconstruction/distributional/scripts/learn_map.py | 1 | 16046 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Benchmark for the quality of the joint space"""
from argparse import ArgumentParser
import logging
import sys
from io import open
import os
from os import path
from time import time
from glob import glob
from collections import defaultdict
from copy import deepcopy
from random import shuffle
import json
import cPickle as pickle
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import NearestNeighbors
import numpy
from numpy import asarray
from langchangetrack.utils.LocalLinearRegression import LocalLinearRegression
__author__ = "Rami Al-Rfou"
__email__ = "rmyeid@gmail.com"
LOGFORMAT = "%(asctime).19s %(levelname)s %(filename)s: %(lineno)s %(message)s"
reg_model = None
K_NN = 1000
class Mapping(object):
""" Mapping between terms/phrases."""
def __init__(self, source=None, target=None):
self.s_lang = source
self.t_lang = target
self.map = None
class IdentityTranslations(Mapping):
def __init__(self, source, target, se, te):
super(IdentityTranslations, self).__init__(source, target)
words = set(se.word_id.keys()) & set(te.word_id.keys())
D = {}
for word in words:
D[word] = word
self.map = D
class Embeddings(object):
""" A list of words and their vector representatoins.
We assume that the given words are sorted by their frequency.
"""
def __init__(self, lang, filename=None, vectors=None, words=None):
self.lang = lang
if filename:
self.filename = filename
self.read_file()
if vectors != None:
self.vectors = asarray(vectors)
if words:
if len(set(words)) == len(words):
self.word_id = {w: i for i, w in enumerate(words)}
else:
logging.debug("We have duplicate words.")
self.word_id = {u'{}_{}'.format(w, i): i for i, w in enumerate(words)}
self.id_word = {i: w for w, i in self.word_id.iteritems()}
self.words = [w for w, i in Embeddings.sorted_words(self.word_id)]
def read_file(self):
raise NotImplementedError("Implement an embeddings reader.")
def get_vectors(self, words=None):
if words:
return asarray([self.vectors[self.word_id[w]] for w in words])
return self.vectors
def __most_frequent(self, n, start=0):
return [x for x, y in sorted(self.word_id.iteritems(), key=lambda(x, y): y)[start:n]]
def most_frequent(self, n, start=0):
return Embeddings(lang=self.lang, words=self.words[start:n],
vectors=self.vectors[start:n])
def least_frequent_n(self, n):
return [x for x, y in sorted(self.word_id.iteritems(),
key=lambda(x, y): y, reverse=True)[:n]]
def words_translations(self, other, mapping, segment):
start, end = segment
s_words = self.__most_frequent(n=end, start=start)
map_ = mapping.map
t_words = [map_[w] for w in s_words]
exact = [(w1, w2) for (w1, w2) in zip(s_words, t_words) if w1.lower() == w2.lower()]
logging.info("{} exact words translations in between {}-{} for "
"{}-{} languages.".format(len(exact), start, end, mapping.s_lang, mapping.t_lang))
s_new_vectors = self.vectors[start:end]
t_new_vectors = asarray([other.vectors[other.word_id[w]] for w in t_words])
source = Embeddings(vectors=s_new_vectors, words=s_words, lang=self.lang)
target = Embeddings(vectors=t_new_vectors, words=t_words, lang=other.lang)
return (source, target)
@staticmethod
def sorted_words(word_id):
return sorted(word_id.iteritems(), key=lambda(x, y): y)
def get_common(self, other, mapping):
""" Limit the two embeddings to the terms that are covered by the mapping."""
self_oov = defaultdict(lambda: 0)
other_oov = defaultdict(lambda: 0)
self_word_id = deepcopy(self.word_id)
other_word_id = deepcopy(other.word_id)
new_words = []
map_ = mapping.map
for i, w in enumerate(self.word_id):
if w not in map_:
self_oov[w] += 1
del self_word_id[w]
continue
if map_[w] not in other.word_id:
other_oov[map_[w]] += 1
del self_word_id[w]
for i, w in enumerate(other.word_id):
if w not in map_:
del other_word_id[w]
logging.info("We could not find {} {} words in our dictionary.".format(
len(self_oov), self.lang))
logging.info("We could not find {} {} words in our target words.".format(
len(other_oov), other.lang))
logging.info("Our {} vocabulary has {} valid words.".format(
self.lang, len(self_word_id)))
sorted_self_word_id = Embeddings.sorted_words(self_word_id)
self_vectors = asarray([self.vectors[i] for w, i in sorted_self_word_id])
self_words = [w for w, i in sorted_self_word_id]
new_self = Embeddings(lang=self.lang, vectors=self_vectors, words=self_words)
sorted_other_word_id = Embeddings.sorted_words(other_word_id)
other_vectors = asarray([other.vectors[i] for w, i in sorted_other_word_id])
other_words = [w for w, i in sorted_other_word_id]
new_other = Embeddings(lang=self.lang, vectors=other_vectors, words=other_words)
return (new_self, new_other)
def split(self, mapping, ignore_exact=True):
""" Generates two embeddings that cover the mapping terms.
If we have a1: b1, a2: b2 mappings in an embeddings space where {a1, b1,
a2, b2} exists, we would like to generates two embeddings spaces one for
{a1, a2} and another for {b1, b2}.
Sometimes it is not desirable to include exact terms a3:a3 in the new
embeddings. Hence, you need to ignore the exact terms.
"""
source_oov = defaultdict(lambda: 0)
target_oov = defaultdict(lambda: 0)
w_exact = defaultdict(lambda: 0)
source_words = []
target_words = []
map_ = mapping.map
for w, id_ in self.word_id.iteritems():
if w not in map_:
source_oov[w] += 1
continue
if map_[w] not in self.word_id:
target_oov[map_[w]] += 1
continue
if w.lower() == map_[w].lower():
w_exact[w] += 1
if ignore_exact:
continue
source_words.append(w)
target_words.append(map_[w])
logging.debug("We could not find {} source words in our dictionary.".format(
len(source_oov)))
logging.debug("We could not find {} target words in our target words.".format(
len(target_oov)))
logging.debug("{} words are exact between languages".format(len(w_exact)))
logging.debug("We found {} pairs of words valid for testing.".format(len(source_words)))
new_s_vectors = asarray([self.vectors[self.word_id[w]] for w in source_words])
source = Embeddings(vectors=new_s_vectors, words=source_words,
lang=mapping.s_lang)
new_t_vectors = asarray([self.vectors[self.word_id[w]] for w in target_words])
target = Embeddings(vectors=new_t_vectors, words=target_words,
lang=mapping.t_lang)
new_mapping = Mapping(source=mapping.s_lang, target=mapping.t_lang)
new_mapping.map = dict(zip(source.words, target.words))
return (source, target, new_mapping)
def common(self, other):
""" Find common terms between languages.
The post condition is that both embeddings vocabulary are in the same
order.
"""
common_words = []
for word in self.word_id:
if word in other.word_id:
common_words.append(word)
new_self_vectors = []
new_other_vectors = []
for word in common_words:
new_self_vectors.append(self.vectors[self.word_id[word]])
new_other_vectors.append(other.vectors[other.word_id[word]])
new_self = Embeddings(vectors=asarray(new_self_vectors), words=common_words,
lang=self.lang)
new_other = Embeddings(vectors=asarray(new_other_vectors), words=common_words,
lang=self.lang)
return (new_self, new_other)
class Word2VecEmbeddings(Embeddings):
""" Word2Vec embeddings reader."""
def read_file(self, limit=-1):
words = []
embeddings = []
with open(self.filename, 'rb') as f:
words_number, size = [int(x) for x in f.readline().strip().split()][:2]
for i, line in enumerate(f):
try:
ws = line.decode('utf-8').strip().split()
words.append(' '.join(ws[:-size]))
embeddings.append([float(x) for x in ws[-size:]])
if i == limit:
break
except Exception, e:
print "Exception", i
print "Exception", line
self.word_id = {w: i for i, w in enumerate(words)}
self.vectors = asarray(embeddings)
assert len(self.word_id) == self.vectors.shape[0]
class Evaluator(object):
""" Evaluator of the alignment between two languages."""
def __init__(self, source_embeddings, target_embeddings, metric='l2', k=5):
self.metric = metric
self.source_embeddings = source_embeddings
self.target_embeddings = target_embeddings
self.k = k
self.row_normalize = True
self.col_normalize = False
@staticmethod
def cosine_knn(vectors, point, k):
distances = numpy.dot(vectors, point)
indices = list(reversed(distances.argsort()))[:k]
return distances[indices], [indices]
def norm(self, vectors):
out = vectors
if self.row_normalize:
norms = (vectors ** 2).sum(axis=1) ** 0.5
out = (vectors.T / norms).T
if self.col_normalize:
norms = (vectors ** 2).sum(axis=0) ** 0.5
norms[norms == 0] = 1
out = vectors / norms
return out
def precision_at_k(self, test_pairs):
if self.metric == 'cosine':
return self.precision_at_k_cosine(test_pairs)
return self.precision_at_k_l2(test_pairs)
def precision_at_k_l2(self, test_pairs):
t_knn = NearestNeighbors(n_neighbors=self.k, algorithm='ball_tree', p=2)
t_knn.fit(self.target_embeddings.vectors)
right = 0
index = 0
for s, t in test_pairs:
assert(s == t)
point = self.source_embeddings.vectors[self.source_embeddings.word_id[s]]
distances, indices = t_knn.kneighbors(point)
t_words = [self.target_embeddings.id_word[i] for i in indices[0]]
t = t.rsplit('_', 1)[0]
t_words = [x.rsplit('_', 1)[0] for x in t_words]
line = u"{: <20}{:<20}{:<50}".format(s, t, u' '.join(t_words))
logging.debug(line.encode('utf-8'))
if t in t_words:
right += 1
index = index + 1
return right / float(len(test_pairs))
def precision_at_k_cosine(self, test_pairs):
s_vectors = self.norm(self.source_embeddings.vectors)
t_vectors = self.norm(self.target_embeddings.vectors)
right = 0
for s, t in test_pairs:
point = self.source_embeddings.vectors[self.source_embeddings.word_id[s]]
distances, indices = Evaluator.cosine_knn(t_vectors, point, self.k)
t_words = [self.target_embeddings.id_word[i] for i in indices[0]]
t = t.rsplit('_', 1)[0]
t_words = [x.rsplit('_', 1)[0] for x in t_words]
line = u"{: <20}{:<20}{:<50}".format(s, t, u' '.join(t_words))
logging.debug(line.encode('utf-8'))
if t in t_words:
right += 1
return right / float(len(test_pairs))
def evaluate(self, mapping, operation, training_segment, test_segment):
(s_train, t_train) = self.source_embeddings.words_translations(self.target_embeddings, mapping, training_segment)
(s_test, t_test) = self.source_embeddings.words_translations(self.target_embeddings, mapping, test_segment)
s_train.vectors = self.norm(s_train.vectors)
t_train.vectors = self.norm(t_train.vectors)
s_test.vectors = self.norm(s_test.vectors)
t_test.vectors = self.norm(t_test.vectors)
if set(s_train.words).intersection(set(s_test.words)):
print (u"Train and test words are overlapping")
s_new, t_new = operation((s_train, t_train), (s_test, t_test))
return None
def linear_regression(train_embeddings, test_embeddings):
global reg_model
s_embeddings, t_embeddings = train_embeddings
s_test, t_test = test_embeddings
reg = LinearRegression()
reg.fit(s_embeddings.vectors, t_embeddings.vectors)
pickle.dump(reg, open(reg_model, 'wb'))
s = Embeddings(vectors=reg.predict(s_test.vectors),
words=s_test.words, lang=s_embeddings.lang)
return s, t_test
def local_linear_regression(train_embeddings, test_embeddings):
global reg_model
print "Using local linear regression with k = ", K_NN
s_embeddings, t_embeddings = train_embeddings
s_test, t_test = test_embeddings
reg = LocalLinearRegression(k_nn=K_NN)
reg.fit(s_embeddings.vectors, t_embeddings.vectors)
pickle.dump(reg, open(reg_model, 'wb'))
return None, None
def identity(train_vectors, all_vectors):
return all_vectors
def evaluate_word2vec(sl, tl, source_file, target_file, method):
print "Proceeding to load embeddings"
s_ = Word2VecEmbeddings(lang=sl, filename=source_file)
t_ = Word2VecEmbeddings(lang=tl, filename=target_file)
print "Loaded word embeddings"
mapping = IdentityTranslations(source=sl, target=tl, se=s_, te=t_)
print "Mapping done"
s, t = s_.get_common(t_, mapping)
print "Common vocab done"
evaluator = Evaluator(source_embeddings=s, target_embeddings=t, metric='l2')
print "Evaluator constructed"
assert(s.vectors.shape == t.vectors.shape)
print "Evaluating"
if method == 'linear':
p1 = evaluator.evaluate(mapping, linear_regression, (0, s.vectors.shape[0]), (0, s.vectors.shape[0]))
elif method == 'locallinear':
p1 = evaluator.evaluate(mapping, local_linear_regression, (0, s.vectors.shape[0]), (0, s.vectors.shape[0]))
def main(args):
global reg_model
global K_NN
reg_model = args.filename
if args.method == 'linear':
evaluate_word2vec('old', 'new', args.old_model, args.new_model, 'linear')
elif args.method == 'locallinear':
K_NN = int(args.knn_val)
evaluate_word2vec('old', 'new', args.old_model, args.new_model, 'locallinear')
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-f", "--file", dest="filename", help="Input file")
parser.add_argument("-o", "--old_model", dest="old_model", help="old model")
parser.add_argument("-n", "--new_model", dest="new_model", help="new model")
parser.add_argument("-k", "--knn", dest="knn_val", default=1000, type=int, help="K in KNN for local linear regression")
parser.add_argument("-m", "--method", dest="method", help="method")
parser.add_argument("-l", "--log", dest="log", help="log verbosity level",
default="INFO")
args = parser.parse_args()
if args.log == 'DEBUG':
sys.excepthook = debug
numeric_level = getattr(logging, args.log.upper(), None)
logging.basicConfig(level=numeric_level, format=LOGFORMAT)
main(args)
| bsd-3-clause |
wildux/moras | MORAS.py | 1 | 6894 | import cv2
import numpy as np
from matplotlib import pyplot as plt
_SIFT = 0
_SURF = 1
_ORB = 2
_BRISK = 3
_HARRIS = 5
_SHI_TOMASI = 6
_FAST = 7
_STAR = 8
_MSER = 9
_BRIEF = 5
_LATCH = 6
_FREAK = 7
_DAISY = 8
MIN_MATCH_COUNT = 10
#TODO: Make this local params
refPt = [] # List of reference points
cropping = False # Cropping being performed
sel_rect_endpoint = []
img = None
def click_and_crop(event, x, y, flags, param):
global refPt, cropping, sel_rect_endpoint, img # grab references to the global var.
if event == cv2.EVENT_LBUTTONDOWN: # Initial coordinates. Cropping = true
cropping = True
refPt = [(x, y)]
elif event == cv2.EVENT_LBUTTONUP: # End coordinates. Cropping = false (done)
cropping = False
refPt.append((x, y))
clone = img.copy()
cv2.rectangle(clone, refPt[0], refPt[1], (0, 255, 255), 2) # Draw a rectangle around the ROI
cv2.imshow("image", clone)
elif event == cv2.EVENT_MOUSEMOVE and cropping: # Update position (moving rectangle)
sel_rect_endpoint = [(x, y)]
def selectROI(image):
global img, refPt, sel_rect_endpoint
img = image ###
cv2.namedWindow("image")
cv2.setMouseCallback("image", click_and_crop)
cv2.imshow('image', img)
while True:
if not cropping:
sel_rect_endpoint = []
elif cropping and sel_rect_endpoint: # Display rectangle (moving)
clone = img.copy()
cv2.rectangle(clone, refPt[0], sel_rect_endpoint[0], (0, 255, 0), 1)
cv2.imshow('image', clone)
if (cv2.waitKey(1) & 0xFF) == ord("c"):
break
cv2.destroyAllWindows()
if len(refPt) == 2:
img = img[refPt[0][1]:refPt[1][1], refPt[0][0]:refPt[1][0]]
return img
def imgPrep(image):
img = cv2.resize(image, (0,0), fx=0.5, fy=0.5)
#img = cv2.GaussianBlur(img,(3,3),0)
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img, imgGray
def point_selection(gray, alg=_HARRIS):
kp = []
#SIFT
if alg == _SIFT:
sift = cv2.xfeatures2d.SIFT_create(sigma=1.4)
kp = sift.detect(gray,None)
#SURF
if alg == _SURF:
surf = cv2.xfeatures2d.SURF_create()
kp = surf.detect(gray,None)
#Harris
elif alg == _HARRIS:
G = gray.copy()
for i in range(5):
if i != 0:
G = cv2.pyrDown(G)
scale = 2**(i)
corners = cv2.goodFeaturesToTrack(image=G,maxCorners=1000,qualityLevel=0.01,minDistance=scale,useHarrisDetector=1, k=0.04)
corners = np.int0(corners)
for corner in corners:
x,y = corner.ravel()
k = cv2.KeyPoint(x*scale, y*scale, scale)
kp.append(k)
#FAST
elif alg == _FAST:
fast = cv2.FastFeatureDetector_create()
kp = fast.detect(gray,None)
#STAR
elif alg == _STAR:
star = cv2.xfeatures2d.StarDetector_create()
kp = star.detect(gray,None)
#GFTT (SHI TOMASI)
elif alg == _SHI_TOMASI:
corners = cv2.goodFeaturesToTrack(gray,100000,0.008,1)
corners = np.int0(corners)
for i in corners:
x,y = i.ravel()
k = cv2.KeyPoint(x, y, 10) #Size (?)
kp.append(k)
#ORB
elif alg == _ORB:
#if small:
# orb = cv2.ORB_create(nfeatures = 2500, nlevels = 8, edgeThreshold = 8, patchSize = 8, fastThreshold = 5)
#else:
# orb = cv2.ORB_create(nfeatures = 50000, nlevels = 8, edgeThreshold = 8, patchSize = 8, fastThreshold = 5)
orb = cv2.ORB_create(nfeatures = 2500, nlevels = 8, edgeThreshold = 8, patchSize = 8, fastThreshold = 5)
kp = orb.detect(gray,None)
#BRISK
elif alg == _BRISK:
brisk = cv2.BRISK_create(thresh = 8, octaves = 4)
kp = brisk.detect(gray,None)
#MSER
elif alg == _MSER:
mser = cv2.MSER_create()
kp = mser.detect(gray,None)
return kp
def feature_extraction(image, kp, alg=_SIFT):
des = []
#SIFT
if alg == _SIFT:
sift = cv2.xfeatures2d.SIFT_create(sigma=1.4)
kp, des = sift.compute(image, kp)
#SURF
elif alg == _SURF:
surf = cv2.xfeatures2d.SURF_create() #400
kp, des = surf.compute(image, kp)
#ORB
elif alg == _ORB:
orb = cv2.ORB_create(nfeatures = 2500, nlevels = 8, edgeThreshold = 8, patchSize = 8, fastThreshold = 5)
kp, des = orb.compute(image, kp)
#BRIEF
elif alg == _BRIEF:
brief = cv2.BriefDescriptorExtractor_create()
kp, des = brief.compute(image, kp)
#LATCH
elif alg == _LATCH:
latch = cv2.xfeatures2d.LATCH_create()
kp, des = latch.compute(image, kp)
#BRISK
elif alg == _BRISK:
brisk = cv2.BRISK_create(thresh = 8, octaves = 4)
kp, des = brisk.compute(image, kp)
#FREAK
elif alg == _FREAK:
freak = cv2.xfeatures2d.FREAK_create()
kp, des = freak.compute(image, kp)
#DAISY
elif alg == _DAISY:
daisy = cv2.xfeatures2d.DAISY_create()
kp, des = daisy.compute(image, kp)
return kp, des
def matching(img1, img2, des1, des2, kp1, kp2, fe=_SIFT, test=False):
if fe == _LATCH or fe == _ORB or fe == _BRISK:
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
else:
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
x = -1; y = -1
good = [] # Good matches; Lowe's ratio test
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append(m)
img2C = img2.copy()
if len(good) >= MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
h,w,_ = img1.shape
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
img2C = cv2.polylines(img2C,[np.int32(dst)],True,255,3, cv2.LINE_AA)
#RETURN POINT
x1, y1 = np.int32(dst)[0].ravel()
x2, y2 = np.int32(dst)[1].ravel()
x3, y3 = np.int32(dst)[2].ravel()
x4, y4 = np.int32(dst)[3].ravel()
x = (x1+x2+x3+x4)/4
y = (y1+y2+y3+y4)/4
else:
print("Not enough matches found", len(good),"of", MIN_MATCH_COUNT)
matchesMask = None
draw_params = dict(matchColor = (0,255,0), singlePointColor = None,
matchesMask = matchesMask, flags = 2)
img3 = cv2.drawMatches(img1,kp1,img2C,kp2,good,None,**draw_params)
if test:
return x, y, img3, len(good)
else:
return x, y, img3
def matching2(img1, img2, des1, des2, kp1, kp2, fe):
if fe == _LATCH or fe == _ORB or fe == _BRISK:
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
else:
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2)
good = [] # Good matches; Lowe's ratio test
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append(m)
return good
def getResult(img1, img2, alg1=_HARRIS, alg2=_SIFT):
img1, img1Gray = imgPrep(img1)
img2, img2Gray = imgPrep(img2)
kp1 = point_selection(img1Gray, alg1)
kp2 = point_selection(img2Gray, alg1)
kp1, des1 = feature_extraction(img1Gray, kp1, alg2)
kp2, des2 = feature_extraction(img2Gray, kp2, alg2)
x, y, img3 = matching(img1, img2, des1, des2, kp1, kp2, alg2)
return img3, x, y
def getAngle(aV, w, x):
if x == -1:
return 0
else:
return (aV*x / w) - (aV/2)
#gray2 = np.float32(gray)
#dst = cv2.cornerHarris(gray2,2,3,0.04)
#dst = cv2.dilate(dst,None)
#roi[dst>0.01*dst.max()]=[0,0,255]
| gpl-3.0 |
jpzk/evopy | evopy/examples/experiments/fitness_cmaessvc/setup.py | 1 | 4837 | '''
This file is part of evopy.
Copyright 2012 - 2013, Jendrik Poloczek
evopy is free software: you can redistribute it
and/or modify it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
evopy is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along with
evopy. If not, see <http://www.gnu.org/licenses/>.
'''
from sys import path
path.append("../../../..")
from copy import deepcopy
from numpy import matrix, log10
from evopy.strategies.cmaes import CMAES
from evopy.strategies.cmaes_svc import CMAESSVC
from evopy.simulators.simulator import Simulator
from evopy.problems.sphere_problem_origin_r1 import SphereProblemOriginR1
from evopy.problems.sphere_problem_origin_r2 import SphereProblemOriginR2
from evopy.problems.schwefels_problem_26 import SchwefelsProblem26
from evopy.problems.tr_problem import TRProblem
from evopy.metamodel.cma_svc_linear_meta_model import CMASVCLinearMetaModel
from sklearn.cross_validation import KFold
from evopy.operators.scaling.scaling_standardscore import ScalingStandardscore
from evopy.operators.scaling.scaling_dummy import ScalingDummy
from evopy.metamodel.cv.svc_cv_sklearn_grid_linear import SVCCVSkGridLinear
from evopy.operators.termination.or_combinator import ORCombinator
from evopy.operators.termination.accuracy import Accuracy
from evopy.operators.termination.generations import Generations
from evopy.operators.termination.convergence import Convergence
def get_method_SphereProblemR1_svc():
sklearn_cv = SVCCVSkGridLinear(\
C_range = [2 ** i for i in range(-3, 14, 2)],
cv_method = KFold(20, 5))
meta_model = CMASVCLinearMetaModel(\
window_size = 10,
scaling = ScalingStandardscore(),
crossvalidation = sklearn_cv,
repair_mode = 'none')
method = CMAESSVC(\
mu = 15,
lambd = 100,
xmean = matrix([[10.0, 10.0]]),
sigma = 4.5,
beta = 0.80,
meta_model = meta_model)
return method
def get_method_SphereProblemR2_svc():
sklearn_cv = SVCCVSkGridLinear(\
C_range = [2 ** i for i in range(-3, 14, 2)],
cv_method = KFold(20, 5))
meta_model = CMASVCLinearMetaModel(\
window_size = 10,
scaling = ScalingStandardscore(),
crossvalidation = sklearn_cv,
repair_mode = 'none')
method = CMAESSVC(\
mu = 15,
lambd = 100,
xmean = matrix([[10.0, 10.0]]),
sigma = 4.5,
beta = 0.80,
meta_model = meta_model)
return method
def get_method_TR_svc():
sklearn_cv = SVCCVSkGridLinear(\
C_range = [2 ** i for i in range(-3, 14, 2)],
cv_method = KFold(20, 5))
meta_model = CMASVCLinearMetaModel(\
window_size = 10,
scaling = ScalingStandardscore(),
crossvalidation = sklearn_cv,
repair_mode = 'none')
method = CMAESSVC(\
mu = 15,
lambd = 100,
xmean = matrix([[10.0, 10.0]]),
sigma = 4.5,
beta = 0.80,
meta_model = meta_model)
return method
def get_method_Schwefel26_svc():
sklearn_cv = SVCCVSkGridLinear(\
C_range = [2 ** i for i in range(-3, 14, 2)],
cv_method = KFold(20, 5))
meta_model = CMASVCLinearMetaModel(\
window_size = 10,
scaling = ScalingStandardscore(),
crossvalidation = sklearn_cv,
repair_mode = 'none')
method = CMAESSVC(\
mu = 15,
lambd = 100,
xmean = matrix([[100.0, 100.0]]),
sigma = 36.0,
beta = 0.80,
meta_model = meta_model)
return method
def create_problem_optimizer_map(typeofelements):
t = typeofelements
return {\
TRProblem: {get_method_TR_svc: deepcopy(t)},
SphereProblemOriginR1: {get_method_SphereProblemR1_svc: deepcopy(t)},
SphereProblemOriginR2: {get_method_SphereProblemR2_svc: deepcopy(t)},
SchwefelsProblem26: {get_method_Schwefel26_svc: deepcopy(t)}}
samples = 100
termination = Generations(200)
problems = [TRProblem, SphereProblemOriginR1,\
SphereProblemOriginR2, SchwefelsProblem26]
optimizers = {\
TRProblem: [get_method_TR_svc],
SphereProblemOriginR1: [get_method_SphereProblemR1_svc],
SphereProblemOriginR2: [get_method_SphereProblemR2_svc],
SchwefelsProblem26: [get_method_Schwefel26_svc]
}
simulators = {\
TRProblem: {},
SphereProblemOriginR1: {},
SphereProblemOriginR2: {},
SchwefelsProblem26: {}
}
fitnesses = create_problem_optimizer_map([])
| gpl-3.0 |
sgiavasis/nipype | nipype/algorithms/rapidart.py | 10 | 31087 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The rapidart module provides routines for artifact detection and region of
interest analysis.
These functions include:
* ArtifactDetect: performs artifact detection on functional images
* StimulusCorrelation: determines correlation between stimuli
schedule and movement/intensity parameters
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import division
from builtins import range
import os
from copy import deepcopy
from nibabel import load, funcs, Nifti1Image
import numpy as np
from scipy import signal
import scipy.io as sio
from ..external.six import string_types
from ..interfaces.base import (BaseInterface, traits, InputMultiPath,
OutputMultiPath, TraitedSpec, File,
BaseInterfaceInputSpec, isdefined)
from ..utils.filemanip import filename_to_list, save_json, split_filename
from ..utils.misc import find_indices
from .. import logging, config
iflogger = logging.getLogger('interface')
def _get_affine_matrix(params, source):
"""Return affine matrix given a set of translation and rotation parameters
params : np.array (upto 12 long) in native package format
source : the package that generated the parameters
supports SPM, AFNI, FSFAST, FSL, NIPY
"""
if source == 'FSL':
params = params[[3, 4, 5, 0, 1, 2]]
elif source in ('AFNI', 'FSFAST'):
params = params[np.asarray([4, 5, 3, 1, 2, 0]) + (len(params) > 6)]
params[3:] = params[3:] * np.pi / 180.
if source == 'NIPY':
# nipy does not store typical euler angles, use nipy to convert
from nipy.algorithms.registration import to_matrix44
return to_matrix44(params)
# process for FSL, SPM, AFNI and FSFAST
rotfunc = lambda x: np.array([[np.cos(x), np.sin(x)],
[-np.sin(x), np.cos(x)]])
q = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0])
if len(params) < 12:
params = np.hstack((params, q[len(params):]))
params.shape = (len(params),)
# Translation
T = np.eye(4)
T[0:3, -1] = params[0:3]
# Rotation
Rx = np.eye(4)
Rx[1:3, 1:3] = rotfunc(params[3])
Ry = np.eye(4)
Ry[(0, 0, 2, 2), (0, 2, 0, 2)] = rotfunc(params[4]).ravel()
Rz = np.eye(4)
Rz[0:2, 0:2] = rotfunc(params[5])
# Scaling
S = np.eye(4)
S[0:3, 0:3] = np.diag(params[6:9])
# Shear
Sh = np.eye(4)
Sh[(0, 0, 1), (1, 2, 2)] = params[9:12]
if source in ('AFNI', 'FSFAST'):
return np.dot(T, np.dot(Ry, np.dot(Rx, np.dot(Rz, np.dot(S, Sh)))))
return np.dot(T, np.dot(Rx, np.dot(Ry, np.dot(Rz, np.dot(S, Sh)))))
def _calc_norm(mc, use_differences, source, brain_pts=None):
"""Calculates the maximum overall displacement of the midpoints
of the faces of a cube due to translation and rotation.
Parameters
----------
mc : motion parameter estimates
[3 translation, 3 rotation (radians)]
use_differences : boolean
brain_pts : [4 x n_points] of coordinates
Returns
-------
norm : at each time point
displacement : euclidean distance (mm) of displacement at each coordinate
"""
if brain_pts is None:
respos = np.diag([70, 70, 75])
resneg = np.diag([-70, -110, -45])
all_pts = np.vstack((np.hstack((respos, resneg)), np.ones((1, 6))))
displacement = None
else:
all_pts = brain_pts
n_pts = all_pts.size - all_pts.shape[1]
newpos = np.zeros((mc.shape[0], n_pts))
if brain_pts is not None:
displacement = np.zeros((mc.shape[0], int(n_pts / 3)))
for i in range(mc.shape[0]):
affine = _get_affine_matrix(mc[i, :], source)
newpos[i, :] = np.dot(affine,
all_pts)[0:3, :].ravel()
if brain_pts is not None:
displacement[i, :] = \
np.sqrt(np.sum(np.power(np.reshape(newpos[i, :],
(3, all_pts.shape[1])) -
all_pts[0:3, :],
2),
axis=0))
# np.savez('displacement.npz', newpos=newpos, pts=all_pts)
normdata = np.zeros(mc.shape[0])
if use_differences:
newpos = np.concatenate((np.zeros((1, n_pts)),
np.diff(newpos, n=1, axis=0)), axis=0)
for i in range(newpos.shape[0]):
normdata[i] = \
np.max(np.sqrt(np.sum(np.reshape(np.power(np.abs(newpos[i, :]), 2),
(3, all_pts.shape[1])), axis=0)))
else:
newpos = np.abs(signal.detrend(newpos, axis=0, type='constant'))
normdata = np.sqrt(np.mean(np.power(newpos, 2), axis=1))
return normdata, displacement
def _nanmean(a, axis=None):
"""Return the mean excluding items that are nan
>>> a = [1, 2, np.nan]
>>> _nanmean(a)
1.5
"""
if axis:
return np.nansum(a, axis) / np.sum(1 - np.isnan(a), axis)
else:
return np.nansum(a) / np.sum(1 - np.isnan(a))
class ArtifactDetectInputSpec(BaseInterfaceInputSpec):
realigned_files = InputMultiPath(File(exists=True),
desc="Names of realigned functional data files",
mandatory=True)
realignment_parameters = InputMultiPath(File(exists=True), mandatory=True,
desc=("Names of realignment parameters"
"corresponding to the functional data files"))
parameter_source = traits.Enum("SPM", "FSL", "AFNI", "NiPy", "FSFAST",
desc="Source of movement parameters",
mandatory=True)
use_differences = traits.ListBool([True, False], minlen=2, maxlen=2,
usedefault=True,
desc=("Use differences between successive motion (first element)"
"and intensity paramter (second element) estimates in order"
"to determine outliers. (default is [True, False])"))
use_norm = traits.Bool(True, requires=['norm_threshold'],
desc=("Uses a composite of the motion parameters in "
"order to determine outliers."),
usedefault=True)
norm_threshold = traits.Float(desc=("Threshold to use to detect motion-rela"
"ted outliers when composite motion is "
"being used"), mandatory=True,
xor=['rotation_threshold',
'translation_threshold'])
rotation_threshold = traits.Float(mandatory=True, xor=['norm_threshold'],
desc=("Threshold (in radians) to use to detect rotation-related "
"outliers"))
translation_threshold = traits.Float(mandatory=True, xor=['norm_threshold'],
desc=("Threshold (in mm) to use to detect translation-related "
"outliers"))
zintensity_threshold = traits.Float(mandatory=True,
desc=("Intensity Z-threshold use to detection images that deviate "
"from the mean"))
mask_type = traits.Enum('spm_global', 'file', 'thresh',
desc=("Type of mask that should be used to mask the functional "
"data. *spm_global* uses an spm_global like calculation to "
"determine the brain mask. *file* specifies a brain mask "
"file (should be an image file consisting of 0s and 1s). "
"*thresh* specifies a threshold to use. By default all voxels"
"are used, unless one of these mask types are defined."),
mandatory=True)
mask_file = File(exists=True,
desc="Mask file to be used if mask_type is 'file'.")
mask_threshold = traits.Float(desc=("Mask threshold to be used if mask_type"
" is 'thresh'."))
intersect_mask = traits.Bool(True,
desc=("Intersect the masks when computed from "
"spm_global."))
save_plot = traits.Bool(True, desc="save plots containing outliers",
usedefault=True)
plot_type = traits.Enum('png', 'svg', 'eps', 'pdf',
desc="file type of the outlier plot",
usedefault=True)
bound_by_brainmask = traits.Bool(False, desc=("use the brain mask to "
"determine bounding box"
"for composite norm (works"
"for SPM and Nipy - currently"
"inaccurate for FSL, AFNI"),
usedefault=True)
global_threshold = traits.Float(8.0, desc=("use this threshold when mask "
"type equal's spm_global"),
usedefault=True)
class ArtifactDetectOutputSpec(TraitedSpec):
outlier_files = OutputMultiPath(File(exists=True),
desc=("One file for each functional run containing a list of "
"0-based indices corresponding to outlier volumes"))
intensity_files = OutputMultiPath(File(exists=True),
desc=("One file for each functional run containing the global "
"intensity values determined from the brainmask"))
norm_files = OutputMultiPath(File,
desc=("One file for each functional run containing the composite "
"norm"))
statistic_files = OutputMultiPath(File(exists=True),
desc=("One file for each functional run containing information "
"about the different types of artifacts and if design info is"
" provided then details of stimulus correlated motion and a "
"listing or artifacts by event type."))
plot_files = OutputMultiPath(File,
desc=("One image file for each functional run containing the "
"detected outliers"))
mask_files = OutputMultiPath(File,
desc=("One image file for each functional run containing the mask"
"used for global signal calculation"))
displacement_files = OutputMultiPath(File,
desc=("One image file for each functional run containing the voxel"
"displacement timeseries"))
class ArtifactDetect(BaseInterface):
"""Detects outliers in a functional imaging series
Uses intensity and motion parameters to infer outliers. If `use_norm` is
True, it computes the movement of the center of each face a cuboid centered
around the head and returns the maximal movement across the centers.
Examples
--------
>>> ad = ArtifactDetect()
>>> ad.inputs.realigned_files = 'functional.nii'
>>> ad.inputs.realignment_parameters = 'functional.par'
>>> ad.inputs.parameter_source = 'FSL'
>>> ad.inputs.norm_threshold = 1
>>> ad.inputs.use_differences = [True, False]
>>> ad.inputs.zintensity_threshold = 3
>>> ad.run() # doctest: +SKIP
"""
input_spec = ArtifactDetectInputSpec
output_spec = ArtifactDetectOutputSpec
def __init__(self, **inputs):
super(ArtifactDetect, self).__init__(**inputs)
def _get_output_filenames(self, motionfile, output_dir):
"""Generate output files based on motion filenames
Parameters
----------
motionfile: file/string
Filename for motion parameter file
output_dir: string
output directory in which the files will be generated
"""
if isinstance(motionfile, string_types):
infile = motionfile
elif isinstance(motionfile, list):
infile = motionfile[0]
else:
raise Exception("Unknown type of file")
_, filename, ext = split_filename(infile)
artifactfile = os.path.join(output_dir, ''.join(('art.', filename,
'_outliers.txt')))
intensityfile = os.path.join(output_dir, ''.join(('global_intensity.',
filename, '.txt')))
statsfile = os.path.join(output_dir, ''.join(('stats.', filename,
'.txt')))
normfile = os.path.join(output_dir, ''.join(('norm.', filename,
'.txt')))
plotfile = os.path.join(output_dir, ''.join(('plot.', filename, '.',
self.inputs.plot_type)))
displacementfile = os.path.join(output_dir, ''.join(('disp.',
filename, ext)))
maskfile = os.path.join(output_dir, ''.join(('mask.', filename, ext)))
return (artifactfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['outlier_files'] = []
outputs['intensity_files'] = []
outputs['statistic_files'] = []
outputs['mask_files'] = []
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
outputs['norm_files'] = []
if self.inputs.bound_by_brainmask:
outputs['displacement_files'] = []
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
outputs['plot_files'] = []
for i, f in enumerate(filename_to_list(self.inputs.realigned_files)):
(outlierfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile) = \
self._get_output_filenames(f, os.getcwd())
outputs['outlier_files'].insert(i, outlierfile)
outputs['intensity_files'].insert(i, intensityfile)
outputs['statistic_files'].insert(i, statsfile)
outputs['mask_files'].insert(i, maskfile)
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
outputs['norm_files'].insert(i, normfile)
if self.inputs.bound_by_brainmask:
outputs['displacement_files'].insert(i, displacementfile)
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
outputs['plot_files'].insert(i, plotfile)
return outputs
def _plot_outliers_with_wave(self, wave, outliers, name):
import matplotlib.pyplot as plt
plt.plot(wave)
plt.ylim([wave.min(), wave.max()])
plt.xlim([0, len(wave) - 1])
if len(outliers):
plt.plot(np.tile(outliers[:, None], (1, 2)).T,
np.tile([wave.min(), wave.max()], (len(outliers), 1)).T,
'r')
plt.xlabel('Scans - 0-based')
plt.ylabel(name)
def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None):
"""
Core routine for detecting outliers
"""
if not cwd:
cwd = os.getcwd()
# read in functional image
if isinstance(imgfile, string_types):
nim = load(imgfile)
elif isinstance(imgfile, list):
if len(imgfile) == 1:
nim = load(imgfile[0])
else:
images = [load(f) for f in imgfile]
nim = funcs.concat_images(images)
# compute global intensity signal
(x, y, z, timepoints) = nim.shape
data = nim.get_data()
affine = nim.affine
g = np.zeros((timepoints, 1))
masktype = self.inputs.mask_type
if masktype == 'spm_global': # spm_global like calculation
iflogger.debug('art: using spm global')
intersect_mask = self.inputs.intersect_mask
if intersect_mask:
mask = np.ones((x, y, z), dtype=bool)
for t0 in range(timepoints):
vol = data[:, :, :, t0]
# Use an SPM like approach
mask_tmp = vol > \
(_nanmean(vol) / self.inputs.global_threshold)
mask = mask * mask_tmp
for t0 in range(timepoints):
vol = data[:, :, :, t0]
g[t0] = _nanmean(vol[mask])
if len(find_indices(mask)) < (np.prod((x, y, z)) / 10):
intersect_mask = False
g = np.zeros((timepoints, 1))
if not intersect_mask:
iflogger.info('not intersect_mask is True')
mask = np.zeros((x, y, z, timepoints))
for t0 in range(timepoints):
vol = data[:, :, :, t0]
mask_tmp = vol > \
(_nanmean(vol) / self.inputs.global_threshold)
mask[:, :, :, t0] = mask_tmp
g[t0] = np.nansum(vol * mask_tmp) / np.nansum(mask_tmp)
elif masktype == 'file': # uses a mask image to determine intensity
maskimg = load(self.inputs.mask_file)
mask = maskimg.get_data()
affine = maskimg.affine
mask = mask > 0.5
for t0 in range(timepoints):
vol = data[:, :, :, t0]
g[t0] = _nanmean(vol[mask])
elif masktype == 'thresh': # uses a fixed signal threshold
for t0 in range(timepoints):
vol = data[:, :, :, t0]
mask = vol > self.inputs.mask_threshold
g[t0] = _nanmean(vol[mask])
else:
mask = np.ones((x, y, z))
g = _nanmean(data[mask > 0, :], 1)
# compute normalized intensity values
gz = signal.detrend(g, axis=0) # detrend the signal
if self.inputs.use_differences[1]:
gz = np.concatenate((np.zeros((1, 1)), np.diff(gz, n=1, axis=0)),
axis=0)
gz = (gz - np.mean(gz)) / np.std(gz) # normalize the detrended signal
iidx = find_indices(abs(gz) > self.inputs.zintensity_threshold)
# read in motion parameters
mc_in = np.loadtxt(motionfile)
mc = deepcopy(mc_in)
(artifactfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile) = self._get_output_filenames(imgfile, cwd)
mask_img = Nifti1Image(mask.astype(np.uint8), affine)
mask_img.to_filename(maskfile)
if self.inputs.use_norm:
brain_pts = None
if self.inputs.bound_by_brainmask:
voxel_coords = np.nonzero(mask)
coords = np.vstack((voxel_coords[0],
np.vstack((voxel_coords[1],
voxel_coords[2])))).T
brain_pts = np.dot(affine,
np.hstack((coords,
np.ones((coords.shape[0], 1)))).T)
# calculate the norm of the motion parameters
normval, displacement = _calc_norm(mc,
self.inputs.use_differences[0],
self.inputs.parameter_source,
brain_pts=brain_pts)
tidx = find_indices(normval > self.inputs.norm_threshold)
ridx = find_indices(normval < 0)
if displacement is not None:
dmap = np.zeros((x, y, z, timepoints), dtype=np.float)
for i in range(timepoints):
dmap[voxel_coords[0],
voxel_coords[1],
voxel_coords[2], i] = displacement[i, :]
dimg = Nifti1Image(dmap, affine)
dimg.to_filename(displacementfile)
else:
if self.inputs.use_differences[0]:
mc = np.concatenate((np.zeros((1, 6)),
np.diff(mc_in, n=1, axis=0)),
axis=0)
traval = mc[:, 0:3] # translation parameters (mm)
rotval = mc[:, 3:6] # rotation parameters (rad)
tidx = find_indices(np.sum(abs(traval) >
self.inputs.translation_threshold, 1) >
0)
ridx = find_indices(np.sum(abs(rotval) >
self.inputs.rotation_threshold, 1) > 0)
outliers = np.unique(np.union1d(iidx, np.union1d(tidx, ridx)))
# write output to outputfile
np.savetxt(artifactfile, outliers, fmt='%d', delimiter=' ')
np.savetxt(intensityfile, g, fmt='%.2f', delimiter=' ')
if self.inputs.use_norm:
np.savetxt(normfile, normval, fmt='%.4f', delimiter=' ')
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
import matplotlib
matplotlib.use(config.get("execution", "matplotlib_backend"))
import matplotlib.pyplot as plt
fig = plt.figure()
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
plt.subplot(211)
else:
plt.subplot(311)
self._plot_outliers_with_wave(gz, iidx, 'Intensity')
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
plt.subplot(212)
self._plot_outliers_with_wave(normval, np.union1d(tidx, ridx),
'Norm (mm)')
else:
diff = ''
if self.inputs.use_differences[0]:
diff = 'diff'
plt.subplot(312)
self._plot_outliers_with_wave(traval, tidx,
'Translation (mm)' + diff)
plt.subplot(313)
self._plot_outliers_with_wave(rotval, ridx,
'Rotation (rad)' + diff)
plt.savefig(plotfile)
plt.close(fig)
motion_outliers = np.union1d(tidx, ridx)
stats = [{'motion_file': motionfile,
'functional_file': imgfile},
{'common_outliers': len(np.intersect1d(iidx, motion_outliers)),
'intensity_outliers': len(np.setdiff1d(iidx,
motion_outliers)),
'motion_outliers': len(np.setdiff1d(motion_outliers, iidx)),
},
{'motion': [{'using differences': self.inputs.use_differences[0]},
{'mean': np.mean(mc_in, axis=0).tolist(),
'min': np.min(mc_in, axis=0).tolist(),
'max': np.max(mc_in, axis=0).tolist(),
'std': np.std(mc_in, axis=0).tolist()},
]},
{'intensity': [{'using differences': self.inputs.use_differences[1]},
{'mean': np.mean(gz, axis=0).tolist(),
'min': np.min(gz, axis=0).tolist(),
'max': np.max(gz, axis=0).tolist(),
'std': np.std(gz, axis=0).tolist()},
]},
]
if self.inputs.use_norm:
stats.insert(3, {'motion_norm':
{'mean': np.mean(normval, axis=0).tolist(),
'min': np.min(normval, axis=0).tolist(),
'max': np.max(normval, axis=0).tolist(),
'std': np.std(normval, axis=0).tolist(),
}})
save_json(statsfile, stats)
def _run_interface(self, runtime):
"""Execute this module.
"""
funcfilelist = filename_to_list(self.inputs.realigned_files)
motparamlist = filename_to_list(self.inputs.realignment_parameters)
for i, imgf in enumerate(funcfilelist):
self._detect_outliers_core(imgf, motparamlist[i], i,
cwd=os.getcwd())
return runtime
class StimCorrInputSpec(BaseInterfaceInputSpec):
realignment_parameters = InputMultiPath(File(exists=True), mandatory=True,
desc=('Names of realignment parameters corresponding to the functional '
'data files'))
intensity_values = InputMultiPath(File(exists=True), mandatory=True,
desc='Name of file containing intensity values')
spm_mat_file = File(exists=True, mandatory=True,
desc='SPM mat file (use pre-estimate SPM.mat file)')
concatenated_design = traits.Bool(mandatory=True,
desc='state if the design matrix contains concatenated sessions')
class StimCorrOutputSpec(TraitedSpec):
stimcorr_files = OutputMultiPath(File(exists=True),
desc='List of files containing correlation values')
class StimulusCorrelation(BaseInterface):
"""Determines if stimuli are correlated with motion or intensity
parameters.
Currently this class supports an SPM generated design matrix and requires
intensity parameters. This implies that one must run
:ref:`ArtifactDetect <nipype.algorithms.rapidart.ArtifactDetect>`
and :ref:`Level1Design <nipype.interfaces.spm.model.Level1Design>` prior to running this or
provide an SPM.mat file and intensity parameters through some other means.
Examples
--------
>>> sc = StimulusCorrelation()
>>> sc.inputs.realignment_parameters = 'functional.par'
>>> sc.inputs.intensity_values = 'functional.rms'
>>> sc.inputs.spm_mat_file = 'SPM.mat'
>>> sc.inputs.concatenated_design = False
>>> sc.run() # doctest: +SKIP
"""
input_spec = StimCorrInputSpec
output_spec = StimCorrOutputSpec
def _get_output_filenames(self, motionfile, output_dir):
"""Generate output files based on motion filenames
Parameters
----------
motionfile: file/string
Filename for motion parameter file
output_dir: string
output directory in which the files will be generated
"""
(_, filename) = os.path.split(motionfile)
(filename, _) = os.path.splitext(filename)
corrfile = os.path.join(output_dir, ''.join(('qa.', filename,
'_stimcorr.txt')))
return corrfile
def _stimcorr_core(self, motionfile, intensityfile, designmatrix, cwd=None):
"""
Core routine for determining stimulus correlation
"""
if not cwd:
cwd = os.getcwd()
# read in motion parameters
mc_in = np.loadtxt(motionfile)
g_in = np.loadtxt(intensityfile)
g_in.shape = g_in.shape[0], 1
dcol = designmatrix.shape[1]
mccol = mc_in.shape[1]
concat_matrix = np.hstack((np.hstack((designmatrix, mc_in)), g_in))
cm = np.corrcoef(concat_matrix, rowvar=0)
corrfile = self._get_output_filenames(motionfile, cwd)
# write output to outputfile
file = open(corrfile, 'w')
file.write("Stats for:\n")
file.write("Stimulus correlated motion:\n%s\n" % motionfile)
for i in range(dcol):
file.write("SCM.%d:" % i)
for v in cm[i, dcol + np.arange(mccol)]:
file.write(" %.2f" % v)
file.write('\n')
file.write("Stimulus correlated intensity:\n%s\n" % intensityfile)
for i in range(dcol):
file.write("SCI.%d: %.2f\n" % (i, cm[i, -1]))
file.close()
def _get_spm_submatrix(self, spmmat, sessidx, rows=None):
"""
Parameters
----------
spmmat: scipy matlab object
full SPM.mat file loaded into a scipy object
sessidx: int
index to session that needs to be extracted.
"""
designmatrix = spmmat['SPM'][0][0].xX[0][0].X
U = spmmat['SPM'][0][0].Sess[0][sessidx].U[0]
if rows is None:
rows = spmmat['SPM'][0][0].Sess[0][sessidx].row[0] - 1
cols = spmmat['SPM'][0][0].Sess[0][sessidx].col[0][list(range(len(U)))] - 1
outmatrix = designmatrix.take(rows.tolist(), axis=0).take(cols.tolist(),
axis=1)
return outmatrix
def _run_interface(self, runtime):
"""Execute this module.
"""
motparamlist = self.inputs.realignment_parameters
intensityfiles = self.inputs.intensity_values
spmmat = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False)
nrows = []
for i in range(len(motparamlist)):
sessidx = i
rows = None
if self.inputs.concatenated_design:
sessidx = 0
mc_in = np.loadtxt(motparamlist[i])
rows = np.sum(nrows) + np.arange(mc_in.shape[0])
nrows.append(mc_in.shape[0])
matrix = self._get_spm_submatrix(spmmat, sessidx, rows)
self._stimcorr_core(motparamlist[i], intensityfiles[i],
matrix, os.getcwd())
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
files = []
for i, f in enumerate(self.inputs.realignment_parameters):
files.insert(i, self._get_output_filenames(f, os.getcwd()))
if files:
outputs['stimcorr_files'] = files
return outputs
| bsd-3-clause |
graphistry/pygraphistry | graphistry/tests/test_arrow_uploader.py | 1 | 5451 | # -*- coding: utf-8 -*-
import graphistry, mock, pandas as pd, pytest, unittest
from graphistry import ArrowUploader
#TODO mock requests for testing actual effectful code
class TestArrowUploader_Core(unittest.TestCase):
def test_au_init_plain(self):
au = ArrowUploader()
with pytest.raises(Exception):
au.token
with pytest.raises(Exception):
au.dataset_id
assert au.edges is None
assert au.nodes is None
assert au.node_encodings == {'bindings': {}}
assert au.edge_encodings == {'bindings': {}}
assert len(au.name) > 0
assert not (au.metadata is None)
def test_au_init_args(self):
n = pd.DataFrame({'n': []})
e = pd.DataFrame({'e': []})
sbp = "s"
vbp = "v"
name = "n"
des = "d"
t = "t"
d = "d"
ne = {"point_color": "c"}
ee = {"edge_color": "c"}
m = {"n": "n"}
ce = False
au = ArrowUploader(server_base_path=sbp, view_base_path=vbp,
name = name,
description = des,
edges = e, nodes = n,
node_encodings = ne, edge_encodings = ee,
token = t, dataset_id = d,
metadata = m,
certificate_validation = ce)
assert au.server_base_path == sbp
assert au.view_base_path == vbp
assert au.name == name
assert au.description == des
assert au.edges is e
assert au.nodes is n
assert au.edge_encodings == ee
assert au.node_encodings == ne
assert au.token == t
assert au.dataset_id == d
assert au.certificate_validation == ce
def test_au_n_enc_mt(self):
g = graphistry.bind()
au = ArrowUploader()
assert au.g_to_node_encodings(g) == {'bindings': {}}
def test_au_n_enc_full(self):
g = graphistry.bind(node='n',
point_color='c', point_size='s', point_title='t', point_label='l',
point_weight='w', point_opacity='o', point_icon='i', point_x='x', point_y='y')
g = g.encode_point_color('c', ["green"], as_categorical=True)
au = ArrowUploader()
assert au.g_to_node_encodings(g) == {
'bindings': {
'node': 'n',
'node_color': 'c',
'node_size': 's',
'node_title': 't',
'node_label': 'l',
'node_weight': 'w',
'node_opacity': 'o',
'node_icon': 'i',
'node_x': 'x',
'node_y': 'y',
},
'complex': {
'default': {
'pointColorEncoding': {
'graphType': 'point',
'encodingType': 'color',
'attribute': 'c',
'variation': 'categorical',
'colors': ['green']
}
}
}
}
def test_au_e_enc_mt(self):
g = graphistry.bind()
au = ArrowUploader()
assert au.g_to_edge_encodings(g) == {'bindings': {}}
def test_au_e_enc_full(self):
g = graphistry.bind(source='s', destination='d',
edge_color='c', edge_title='t', edge_label='l', edge_weight='w',
edge_opacity='o', edge_icon='i', edge_size='s', edge_source_color='sc', edge_destination_color='dc')
g = g.encode_edge_color('c', ["green"], as_categorical=True)
au = ArrowUploader()
assert au.g_to_edge_encodings(g) == {
'bindings': {
'source': 's',
'destination': 'd',
'edge_color': 'c',
'edge_title': 't',
'edge_label': 'l',
'edge_weight': 'w',
'edge_opacity': 'o',
'edge_icon': 'i',
'edge_size': 's',
'edge_source_color': 'sc',
'edge_destination_color': 'dc'
},
'complex': {
'default': {
'edgeColorEncoding': {
'graphType': 'edge',
'encodingType': 'color',
'attribute': 'c',
'variation': 'categorical',
'colors': ['green']
}
}
}
}
class TestArrowUploader_Comms(unittest.TestCase):
def _mock_response(
self,
status=200,
content="CONTENT",
json_data=None,
raise_for_status=None):
mock_resp = mock.Mock()
# mock raise_for_status call w/optional error
mock_resp.raise_for_status = mock.Mock()
if raise_for_status:
mock_resp.raise_for_status.side_effect = raise_for_status
# set status code and content
mock_resp.status_code = status
mock_resp.content = content
# add json data if provided
if json_data:
mock_resp.json = mock.Mock(return_value=json_data)
return mock_resp
@mock.patch('requests.post')
def test_login(self, mock_post):
mock_resp = self._mock_response(json_data={'token': '123'})
mock_post.return_value = mock_resp
au = ArrowUploader()
tok = au.login(username="u", password="p").token
assert tok == "123"
| bsd-3-clause |
mfjb/scikit-learn | sklearn/neighbors/classification.py | 132 | 14388 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import check_array
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default = 'minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform', algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
if weights is None:
mode, _ = stats.mode(_y[neigh_ind, k], axis=1)
else:
mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1)
mode = np.asarray(mode.ravel(), dtype=np.intp)
y_pred[:, k] = classes_k.take(mode)
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
of such arrays if n_outputs > 1.
The class probabilities of the input samples. Classes are ordered
by lexicographic order.
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_samples = X.shape[0]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(neigh_ind)
all_rows = np.arange(X.shape[0])
probabilities = []
for k, classes_k in enumerate(classes_):
pred_labels = _y[:, k][neigh_ind]
proba_k = np.zeros((n_samples, classes_k.size))
# a simple ':' index doesn't work right
for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors)
proba_k[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
probabilities.append(proba_k)
if not self.outputs_2d_:
probabilities = probabilities[0]
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Read more in the :ref:`User Guide <classification>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label : int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, metric='minkowski',
outlier_label=None, metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of shape [n_samples] or [n_samples, n_outputs]
Class labels for each data sample.
"""
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
neigh_dist, neigh_ind = self.radius_neighbors(X)
inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0]
outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0]
classes_ = self.classes_
_y = self._y
if not self.outputs_2d_:
_y = self._y.reshape((-1, 1))
classes_ = [self.classes_]
n_outputs = len(classes_)
if self.outlier_label is not None:
neigh_dist[outliers] = 1e-6
elif outliers:
raise ValueError('No neighbors found for test samples %r, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them from your dataset.'
% outliers)
weights = _get_weights(neigh_dist, self.weights)
y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype)
for k, classes_k in enumerate(classes_):
pred_labels = np.array([_y[ind, k] for ind in neigh_ind],
dtype=object)
if weights is None:
mode = np.array([stats.mode(pl)[0]
for pl in pred_labels[inliers]], dtype=np.int)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w)
in zip(pred_labels[inliers], weights)],
dtype=np.int)
mode = mode.ravel()
y_pred[inliers, k] = classes_k.take(mode)
if outliers:
y_pred[outliers, :] = self.outlier_label
if not self.outputs_2d_:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/core/indexes/timedeltas.py | 1 | 27500 | """ implement the TimedeltaIndex """
from datetime import datetime
import warnings
import numpy as np
from pandas._libs import (
NaT, Timedelta, index as libindex, join as libjoin, lib)
import pandas.compat as compat
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.common import (
_TD_DTYPE, ensure_int64, is_float, is_integer, is_list_like, is_scalar,
is_timedelta64_dtype, is_timedelta64_ns_dtype, pandas_dtype)
import pandas.core.dtypes.concat as _concat
from pandas.core.dtypes.missing import isna
from pandas.core.accessor import delegate_names
from pandas.core.arrays import datetimelike as dtl
from pandas.core.arrays.timedeltas import TimedeltaArray, _is_convertible_to_td
from pandas.core.base import _shared_docs
import pandas.core.common as com
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.datetimelike import (
DatetimeIndexOpsMixin, DatetimelikeDelegateMixin, maybe_unwrap_index,
wrap_arithmetic_op)
from pandas.core.indexes.numeric import Int64Index
from pandas.core.ops import get_op_result_name
from pandas.tseries.frequencies import to_offset
def _make_wrapped_arith_op(opname):
meth = getattr(TimedeltaArray, opname)
def method(self, other):
result = meth(self._data, maybe_unwrap_index(other))
return wrap_arithmetic_op(self, other, result)
method.__name__ = opname
return method
class TimedeltaDelegateMixin(DatetimelikeDelegateMixin):
# Most attrs are dispatched via datetimelike_{ops,methods}
# Some are "raw" methods, the result is not not re-boxed in an Index
# We also have a few "extra" attrs, which may or may not be raw,
# which we we dont' want to expose in the .dt accessor.
_delegate_class = TimedeltaArray
_delegated_properties = (TimedeltaArray._datetimelike_ops + [
'components',
])
_delegated_methods = TimedeltaArray._datetimelike_methods + [
'_box_values',
]
_raw_properties = {
'components',
}
_raw_methods = {
'to_pytimedelta',
}
@delegate_names(TimedeltaArray,
TimedeltaDelegateMixin._delegated_properties,
typ="property")
@delegate_names(TimedeltaArray,
TimedeltaDelegateMixin._delegated_methods,
typ="method", overwrite=False)
class TimedeltaIndex(DatetimeIndexOpsMixin, dtl.TimelikeOps, Int64Index,
TimedeltaDelegateMixin):
"""
Immutable ndarray of timedelta64 data, represented internally as int64, and
which can be boxed to timedelta objects
Parameters
----------
data : array-like (1-dimensional), optional
Optional timedelta-like data to construct index with
unit : unit of the arg (D,h,m,s,ms,us,ns) denote the unit, optional
which is an integer/float number
freq : string or pandas offset object, optional
One of pandas date offset strings or corresponding objects. The string
'infer' can be passed in order to set the frequency of the index as the
inferred frequency upon creation
copy : bool
Make a copy of input ndarray
start : starting value, timedelta-like, optional
If data is None, start is used as the start point in generating regular
timedelta data.
.. deprecated:: 0.24.0
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
.. deprecated:: 0.24.0
end : end time, timedelta-like, optional
If periods is none, generated index will extend to first conforming
time on or just past end argument
.. deprecated:: 0.24. 0
closed : string or None, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
.. deprecated:: 0.24. 0
name : object
Name to be stored in the index
Attributes
----------
days
seconds
microseconds
nanoseconds
components
inferred_freq
Methods
-------
to_pytimedelta
to_series
round
floor
ceil
to_frame
See Also
---------
Index : The base pandas Index type.
Timedelta : Represents a duration between two dates or times.
DatetimeIndex : Index of datetime64 data.
PeriodIndex : Index of Period data.
timedelta_range : Create a fixed-frequency TimedeltaIndex.
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Creating a TimedeltaIndex based on `start`, `periods`, and `end` has
been deprecated in favor of :func:`timedelta_range`.
"""
_typ = 'timedeltaindex'
_join_precedence = 10
def _join_i8_wrapper(joinf, **kwargs):
return DatetimeIndexOpsMixin._join_i8_wrapper(
joinf, dtype='m8[ns]', **kwargs)
_inner_indexer = _join_i8_wrapper(libjoin.inner_join_indexer_int64)
_outer_indexer = _join_i8_wrapper(libjoin.outer_join_indexer_int64)
_left_indexer = _join_i8_wrapper(libjoin.left_join_indexer_int64)
_left_indexer_unique = _join_i8_wrapper(
libjoin.left_join_indexer_unique_int64, with_indexers=False)
_engine_type = libindex.TimedeltaEngine
_comparables = ['name', 'freq']
_attributes = ['name', 'freq']
_is_numeric_dtype = True
_infer_as_myclass = True
_freq = None
_box_func = TimedeltaArray._box_func
_bool_ops = TimedeltaArray._bool_ops
_object_ops = TimedeltaArray._object_ops
_field_ops = TimedeltaArray._field_ops
_datetimelike_ops = TimedeltaArray._datetimelike_ops
_datetimelike_methods = TimedeltaArray._datetimelike_methods
_other_ops = TimedeltaArray._other_ops
# -------------------------------------------------------------------
# Constructors
def __new__(cls, data=None, unit=None, freq=None, start=None, end=None,
periods=None, closed=None, dtype=_TD_DTYPE, copy=False,
name=None, verify_integrity=None):
if verify_integrity is not None:
warnings.warn("The 'verify_integrity' argument is deprecated, "
"will be removed in a future version.",
FutureWarning, stacklevel=2)
else:
verify_integrity = True
if data is None:
freq, freq_infer = dtl.maybe_infer_freq(freq)
warnings.warn("Creating a TimedeltaIndex by passing range "
"endpoints is deprecated. Use "
"`pandas.timedelta_range` instead.",
FutureWarning, stacklevel=2)
result = TimedeltaArray._generate_range(start, end, periods, freq,
closed=closed)
return cls._simple_new(result._data, freq=freq, name=name)
if is_scalar(data):
raise TypeError('{cls}() must be called with a '
'collection of some kind, {data} was passed'
.format(cls=cls.__name__, data=repr(data)))
if isinstance(data, TimedeltaArray):
if copy:
data = data.copy()
return cls._simple_new(data, name=name, freq=freq)
if (isinstance(data, TimedeltaIndex) and
freq is None and name is None):
if copy:
return data.copy()
else:
return data._shallow_copy()
# - Cases checked above all return/raise before reaching here - #
tdarr = TimedeltaArray._from_sequence(data, freq=freq, unit=unit,
dtype=dtype, copy=copy)
return cls._simple_new(tdarr._data, freq=tdarr.freq, name=name)
@classmethod
def _simple_new(cls, values, name=None, freq=None, dtype=_TD_DTYPE):
# `dtype` is passed by _shallow_copy in corner cases, should always
# be timedelta64[ns] if present
if not isinstance(values, TimedeltaArray):
values = TimedeltaArray._simple_new(values, dtype=dtype,
freq=freq)
else:
if freq is None:
freq = values.freq
assert isinstance(values, TimedeltaArray), type(values)
assert dtype == _TD_DTYPE, dtype
assert values.dtype == 'm8[ns]', values.dtype
tdarr = TimedeltaArray._simple_new(values._data, freq=freq)
result = object.__new__(cls)
result._data = tdarr
result.name = name
# For groupby perf. See note in indexes/base about _index_data
result._index_data = tdarr._data
result._reset_identity()
return result
# -------------------------------------------------------------------
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
super(TimedeltaIndex, self).__setstate__(state)
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
freq = attrs.get('freq', None)
if freq is not None:
# no need to infer if freq is None
attrs['freq'] = 'infer'
return attrs
# -------------------------------------------------------------------
# Rendering Methods
@property
def _formatter_func(self):
from pandas.io.formats.format import _get_format_timedelta64
return _get_format_timedelta64(self, box=True)
def _format_native_types(self, na_rep='NaT', date_format=None, **kwargs):
from pandas.io.formats.format import Timedelta64Formatter
return Timedelta64Formatter(values=self,
nat_rep=na_rep,
justify='all').get_result()
# -------------------------------------------------------------------
# Wrapping TimedeltaArray
__mul__ = _make_wrapped_arith_op("__mul__")
__rmul__ = _make_wrapped_arith_op("__rmul__")
__floordiv__ = _make_wrapped_arith_op("__floordiv__")
__rfloordiv__ = _make_wrapped_arith_op("__rfloordiv__")
__mod__ = _make_wrapped_arith_op("__mod__")
__rmod__ = _make_wrapped_arith_op("__rmod__")
__divmod__ = _make_wrapped_arith_op("__divmod__")
__rdivmod__ = _make_wrapped_arith_op("__rdivmod__")
__truediv__ = _make_wrapped_arith_op("__truediv__")
__rtruediv__ = _make_wrapped_arith_op("__rtruediv__")
if compat.PY2:
__div__ = __truediv__
__rdiv__ = __rtruediv__
# Compat for frequency inference, see GH#23789
_is_monotonic_increasing = Index.is_monotonic_increasing
_is_monotonic_decreasing = Index.is_monotonic_decreasing
_is_unique = Index.is_unique
@property
def _box_func(self):
return lambda x: Timedelta(x, unit='ns')
def __getitem__(self, key):
result = self._data.__getitem__(key)
if is_scalar(result):
return result
return type(self)(result, name=self.name)
# -------------------------------------------------------------------
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_timedelta64_dtype(dtype) and not is_timedelta64_ns_dtype(dtype):
# Have to repeat the check for 'timedelta64' (not ns) dtype
# so that we can return a numeric index, since pandas will return
# a TimedeltaIndex when dtype='timedelta'
result = self._data.astype(dtype, copy=copy)
if self.hasnans:
return Index(result, name=self.name)
return Index(result.astype('i8'), name=self.name)
return DatetimeIndexOpsMixin.astype(self, dtype, copy=copy)
def union(self, other):
"""
Specialized union for TimedeltaIndex objects. If combine
overlapping ranges with the same DateOffset, will be much
faster than Index.union
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if len(other) == 0 or self.equals(other) or len(self) == 0:
return super(TimedeltaIndex, self).union(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
this, other = self, other
if this._can_fast_union(other):
return this._fast_union(other)
else:
result = Index.union(this, other)
if isinstance(result, TimedeltaIndex):
if result.freq is None:
result.freq = to_offset(result.inferred_freq)
return result
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
"""
See Index.join
"""
if _is_convertible_to_index(other):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
return Index.join(self, other, how=how, level=level,
return_indexers=return_indexers,
sort=sort)
def _wrap_joined_index(self, joined, other):
name = get_op_result_name(self, other)
if (isinstance(other, TimedeltaIndex) and self.freq == other.freq and
self._can_fast_union(other)):
joined = self._shallow_copy(joined, name=name)
return joined
else:
return self._simple_new(joined, name)
def _can_fast_union(self, other):
if not isinstance(other, TimedeltaIndex):
return False
freq = self.freq
if freq is None or freq != other.freq:
return False
if not self.is_monotonic or not other.is_monotonic:
return False
if len(self) == 0 or len(other) == 0:
return True
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
right_start = right[0]
left_end = left[-1]
# Only need to "adjoin", not overlap
return (right_start == left_end + freq) or right_start in left
def _fast_union(self, other):
if len(other) == 0:
return self.view(type(self))
if len(self) == 0:
return other.view(type(self))
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
left_end = left[-1]
right_end = right[-1]
# concatenate
if left_end < right_end:
loc = right.searchsorted(left_end, side='right')
right_chunk = right.values[loc:]
dates = _concat._concat_compat((left.values, right_chunk))
return self._shallow_copy(dates)
else:
return left
def intersection(self, other):
"""
Specialized intersection for TimedeltaIndex objects. May be much faster
than Index.intersection
Parameters
----------
other : TimedeltaIndex or array-like
Returns
-------
y : Index or TimedeltaIndex
"""
self._assert_can_do_setop(other)
if self.equals(other):
return self._get_reconciled_name_object(other)
if not isinstance(other, TimedeltaIndex):
try:
other = TimedeltaIndex(other)
except (TypeError, ValueError):
pass
result = Index.intersection(self, other)
return result
if len(self) == 0:
return self
if len(other) == 0:
return other
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
def _maybe_promote(self, other):
if other.inferred_type == 'timedelta':
other = TimedeltaIndex(other)
return self, other
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
if _is_convertible_to_td(key):
key = Timedelta(key)
return self.get_value_maybe_box(series, key)
try:
return com.maybe_box(self, Index.get_value(self, series, key),
series, key)
except KeyError:
try:
loc = self._get_string_slice(key)
return series[loc]
except (TypeError, ValueError, KeyError):
pass
try:
return self.get_value_maybe_box(series, key)
except (TypeError, ValueError, KeyError):
raise KeyError(key)
def get_value_maybe_box(self, series, key):
if not isinstance(key, Timedelta):
key = Timedelta(key)
values = self._engine.get_value(com.values_from_object(series), key)
return com.maybe_box(self, values, series, key)
def get_loc(self, key, method=None, tolerance=None):
"""
Get integer location for requested label
Returns
-------
loc : int
"""
if is_list_like(key) or (isinstance(key, datetime) and key is not NaT):
# GH#20464 datetime check here is to ensure we don't allow
# datetime objects to be incorrectly treated as timedelta
# objects; NaT is a special case because it plays a double role
# as Not-A-Timedelta
raise TypeError
if isna(key):
key = NaT
if tolerance is not None:
# try converting tolerance now, so errors don't get swallowed by
# the try/except clauses below
tolerance = self._convert_tolerance(tolerance, np.asarray(key))
if _is_convertible_to_td(key):
key = Timedelta(key)
return Index.get_loc(self, key, method, tolerance)
try:
return Index.get_loc(self, key, method, tolerance)
except (KeyError, ValueError, TypeError):
try:
return self._get_string_slice(key)
except (TypeError, KeyError, ValueError):
pass
try:
stamp = Timedelta(key)
return Index.get_loc(self, stamp, method, tolerance)
except (KeyError, ValueError):
raise KeyError(key)
def _maybe_cast_slice_bound(self, label, side, kind):
"""
If label is a string, cast it to timedelta according to resolution.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
"""
assert kind in ['ix', 'loc', 'getitem', None]
if isinstance(label, compat.string_types):
parsed = Timedelta(label)
lbound = parsed.round(parsed.resolution)
if side == 'left':
return lbound
else:
return (lbound + to_offset(parsed.resolution) -
Timedelta(1, 'ns'))
elif ((is_integer(label) or is_float(label)) and
not is_timedelta64_dtype(label)):
self._invalid_indexer('slice', label)
return label
def _get_string_slice(self, key):
if is_integer(key) or is_float(key) or key is NaT:
self._invalid_indexer('slice', key)
loc = self._partial_td_slice(key)
return loc
def _partial_td_slice(self, key):
# given a key, try to figure out a location for a partial slice
if not isinstance(key, compat.string_types):
return key
raise NotImplementedError
@Substitution(klass='TimedeltaIndex')
@Appender(_shared_docs['searchsorted'])
def searchsorted(self, value, side='left', sorter=None):
if isinstance(value, (np.ndarray, Index)):
value = np.array(value, dtype=_TD_DTYPE, copy=False)
else:
value = Timedelta(value).asm8.view(_TD_DTYPE)
return self.values.searchsorted(value, side=side, sorter=sorter)
def is_type_compatible(self, typ):
return typ == self.inferred_type or typ == 'timedelta'
@property
def inferred_type(self):
return 'timedelta64'
@property
def is_all_dates(self):
return True
def insert(self, loc, item):
"""
Make new Index inserting new item at location
Parameters
----------
loc : int
item : object
if not either a Python datetime or a numpy integer-like, returned
Index dtype will be object rather than datetime.
Returns
-------
new_index : Index
"""
# try to convert if possible
if _is_convertible_to_td(item):
try:
item = Timedelta(item)
except Exception:
pass
elif is_scalar(item) and isna(item):
# GH 18295
item = self._na_value
freq = None
if isinstance(item, Timedelta) or (is_scalar(item) and isna(item)):
# check freq can be preserved on edge cases
if self.freq is not None:
if ((loc == 0 or loc == -len(self)) and
item + self.freq == self[0]):
freq = self.freq
elif (loc == len(self)) and item - self.freq == self[-1]:
freq = self.freq
item = Timedelta(item).asm8.view(_TD_DTYPE)
try:
new_tds = np.concatenate((self[:loc].asi8, [item.view(np.int64)],
self[loc:].asi8))
return self._shallow_copy(new_tds, freq=freq)
except (AttributeError, TypeError):
# fall back to object index
if isinstance(item, compat.string_types):
return self.astype(object).insert(loc, item)
raise TypeError(
"cannot insert TimedeltaIndex with incompatible label")
def delete(self, loc):
"""
Make a new TimedeltaIndex with passed location(s) deleted.
Parameters
----------
loc: int, slice or array of ints
Indicate which sub-arrays to remove.
Returns
-------
new_index : TimedeltaIndex
"""
new_tds = np.delete(self.asi8, loc)
freq = 'infer'
if is_integer(loc):
if loc in (0, -len(self), -1, len(self) - 1):
freq = self.freq
else:
if is_list_like(loc):
loc = lib.maybe_indices_to_slice(
ensure_int64(np.array(loc)), len(self))
if isinstance(loc, slice) and loc.step in (1, None):
if (loc.start in (0, None) or loc.stop in (len(self), None)):
freq = self.freq
return TimedeltaIndex(new_tds, name=self.name, freq=freq)
TimedeltaIndex._add_comparison_ops()
TimedeltaIndex._add_numeric_methods_unary()
TimedeltaIndex._add_logical_methods_disabled()
TimedeltaIndex._add_datetimelike_methods()
def _is_convertible_to_index(other):
"""
return a boolean whether I can attempt conversion to a TimedeltaIndex
"""
if isinstance(other, TimedeltaIndex):
return True
elif (len(other) > 0 and
other.inferred_type not in ('floating', 'mixed-integer', 'integer',
'mixed-integer-float', 'mixed')):
return True
return False
def timedelta_range(start=None, end=None, periods=None, freq=None,
name=None, closed=None):
"""
Return a fixed frequency TimedeltaIndex, with day as the default
frequency
Parameters
----------
start : string or timedelta-like, default None
Left bound for generating timedeltas
end : string or timedelta-like, default None
Right bound for generating timedeltas
periods : integer, default None
Number of periods to generate
freq : string or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'
name : string, default None
Name of the resulting TimedeltaIndex
closed : string, default None
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None)
Returns
-------
rng : TimedeltaIndex
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``TimedeltaIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`__.
Examples
--------
>>> pd.timedelta_range(start='1 day', periods=4)
TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``closed`` parameter specifies which endpoint is included. The default
behavior is to include both endpoints.
>>> pd.timedelta_range(start='1 day', periods=4, closed='right')
TimedeltaIndex(['2 days', '3 days', '4 days'],
dtype='timedelta64[ns]', freq='D')
The ``freq`` parameter specifies the frequency of the TimedeltaIndex.
Only fixed frequencies can be passed, non-fixed frequencies such as
'M' (month end) will raise.
>>> pd.timedelta_range(start='1 day', end='2 days', freq='6H')
TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',
'1 days 18:00:00', '2 days 00:00:00'],
dtype='timedelta64[ns]', freq='6H')
Specify ``start``, ``end``, and ``periods``; the frequency is generated
automatically (linearly spaced).
>>> pd.timedelta_range(start='1 day', end='5 days', periods=4)
TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',
'5 days 00:00:00'],
dtype='timedelta64[ns]', freq=None)
"""
if freq is None and com._any_none(periods, start, end):
freq = 'D'
freq, freq_infer = dtl.maybe_infer_freq(freq)
tdarr = TimedeltaArray._generate_range(start, end, periods, freq,
closed=closed)
return TimedeltaIndex._simple_new(tdarr._data, freq=tdarr.freq, name=name)
| bsd-3-clause |
google-research/google-research | graph_embedding/monet/shilling_experiment.py | 1 | 30570 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs shilling attack experiment in Section 4 of associated manuscript."""
# Imports
from __future__ import print_function
import collections
import copy
import json
import os
import random
import time
from call_glove import GloVe
from gensim.models import Word2Vec
from gensim.models.callbacks import CallbackAny2Vec
from glove_util import count_cooccurrences
import numpy
from scipy.stats import pearsonr
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import scale
import tensorflow as tf
# User-defined hyperparameters for the experiment.
DATA_FILE = 'movielens/ml-100k/u.data'
SAVE_DIR = 'experiment_data/shilling'
NUMBER_OF_EXPERIMENTS = 10
FRACTION_ATTACKERS = 0.05
FRACTION_ATTACKERS_KNOWN = 0.5
NUMBER_TO_SHILL = 10
NUMBER_TARGETS = 1
NUMBER_TO_ATTACK = 100
SEED = 0
NEIGHBORS_TO_EVAL = 20
NUMBER_GLOVE_ITERATIONS = 20
WALK_LENGTH = 5
WINDOW_SIZE = 5
PERCENTILE_THRESHOLD = 90.0
OVERWRITE_EMBEDDINGS = True
DO_EVAL = True
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# pylint: disable=dangerous-default-value
# pylint: disable=invalid-name
if not os.path.isdir(SAVE_DIR):
os.makedirs(SAVE_DIR)
# MovieLens Utils
def filter_by_weight(graph, min_weight):
"""Ensure edges >= min_weight are all that's left."""
new_graph = collections.defaultdict(dict)
for u in graph:
for i in graph[u]:
if graph[u][i] >= min_weight:
new_graph[u][i] = graph[u][i]
return new_graph
def make_graph_from_array(array):
graph = collections.defaultdict(dict)
item_set = set()
user_set = set()
for x in range(array.shape[0]):
user = 'u_' + str(array[x][0])
item = 'i_' + str(array[x][1])
graph[user][item] = array[x][2]
user_set.add(user)
item_set.add(item)
return graph, user_set, item_set
def load_movielens_graph(filepath):
array = numpy.loadtxt(open(filepath), dtype=numpy.int64)
return make_graph_from_array(array)
def make_movielens_undirected(graph):
newgraph = collections.defaultdict(dict)
for x in graph.keys():
for y in graph[x].keys():
newgraph[x][y] = graph[x][y]
newgraph[y][x] = graph[x][y]
return newgraph
def count_item_popularity(graph):
cnt = collections.Counter()
for x in graph:
for y in graph[x]:
cnt[y] += 1
return cnt
def count_item_avg_score(graph):
score_cnt = collections.Counter()
item_cnt = collections.Counter()
for x in graph:
for y in graph[x]:
score_cnt[y] += graph[x][y]
item_cnt[y] += 1
avg_score = {}
for x in item_cnt:
avg_score[x] = score_cnt[x] / float(item_cnt[x])
return avg_score
# DeepWalk Utils
def load_edgelist(filepath, src_first=True, undirected=True):
graph = collections.defaultdict(dict)
with open(filepath) as f:
for line in f:
line = line.strip().split()
if src_first:
src = line[0]
dst = line[1]
else:
src = line[1]
dst = line[0]
graph[src][dst] = 1
if undirected:
graph[dst][src] = 1
return graph
def sample_next_node(graph, node):
d = graph[node]
v_list = sorted(d.keys())
num = len(v_list)
if num > 0:
random_value = numpy.random.choice(num)
return v_list[random_value]
else:
return node
def generate_random_walks(graph,
walks_per_node,
walk_length,
random_seed=12345):
random.seed(random_seed)
for node in sorted(graph):
for _ in range(walks_per_node):
walk = [node]
for _ in range(walk_length):
walk.append(sample_next_node(graph, walk[-1]))
yield walk
def remove_users_from_walks(walks):
filtered_walks = []
for walk in walks:
new_walk = []
for w in walk:
if 'u' not in w:
new_walk.append(w)
filtered_walks.append(new_walk)
return filtered_walks
class EpochLogger(CallbackAny2Vec):
"""Callback to log information about training."""
def __init__(self):
self.epoch = 0
def on_epoch_begin(self, model):
print('Epoch #{} start'.format(self.epoch))
def on_epoch_end(self, model):
print('Epoch #{} end'.format(self.epoch))
self.epoch += 1
def RunDeepWalk(graph,
num_walks_node,
walk_length,
embedding_dim,
iterations,
window=5,
remove_users=False,
random_seed=12345):
sentences = sorted(
generate_random_walks(
graph,
walks_per_node=num_walks_node,
walk_length=walk_length,
random_seed=random_seed))
if remove_users:
sentences = remove_users_from_walks(sentences)
random.seed(random_seed)
random.shuffle(sentences)
model = None
model = Word2Vec(
sentences=sentences,
min_count=0,
sg=1,
hs=1,
negative=0,
size=embedding_dim,
seed=random_seed,
sample=0,
workers=12,
window=window,
iter=iterations)
model.train(
sentences,
total_examples=model.corpus_count,
epochs=model.epochs,
callbacks=[EpochLogger()])
return model, sentences
def DeepWalkMovielens(G,
num_walks_node=100,
walk_length=5,
embedding_dim=128,
iterations=10,
window=5,
remove_users=False,
make_undirected_within=False,
random_seed=12345):
# make movielens undirected
if make_undirected_within:
G_undirected = make_movielens_undirected(G)
else:
G_undirected = G
# create embeddings
embedding, walks = RunDeepWalk(
G_undirected,
num_walks_node=num_walks_node,
walk_length=walk_length,
embedding_dim=embedding_dim,
iterations=iterations,
window=window,
remove_users=remove_users,
random_seed=random_seed)
# extract symbol: vector embedding table
embedding_map = {}
for i, v in enumerate(embedding.wv.index2word):
embedding_map[v] = embedding.wv.vectors[i]
return embedding_map, walks
# Attack Utils
def top_k_add(graph,
target_items=None,
attackers=None,
target_probability=1.0,
attack_weight=5,
attack_table={}):
"""assume graph is [users x items]."""
# add ratings for attackers
for a in attackers:
for t in target_items:
if random.uniform(0, 1.) < target_probability:
if attack_weight is None:
graph[a][t] = attack_table[t]
else:
graph[a][t] = attack_weight
def top_k_high_degree_attack(graph,
target_items=None,
attackers=None,
to_attack=None,
popular_video_count=100,
to_attack_probability=1.0,
target_probability=1.0,
random_seed=12345):
"""rate popular (trending) items, then add ours [users x items]."""
numpy.random.seed(random_seed)
new_graph = graph
# who are the attackers?
if not attackers:
attackers = list(
numpy.random.choice(
graph.keys(), size=int(len(graph) * 0.01), replace=False))
if not to_attack:
popular_items = count_item_popularity(graph)
# grab the most common nodes to attack
to_attack = list(
list(zip(*popular_items.most_common(popular_video_count)))[0])
# attack popular stuff
top_k_add(
new_graph,
target_items=to_attack,
attackers=attackers,
target_probability=to_attack_probability)
# attack the target
top_k_add(
new_graph,
target_items=target_items,
attackers=attackers,
target_probability=target_probability)
return new_graph
# Monet Utils
def make_attack_covariate(user_item_graph, known_attackers, normalize=False):
total_users = collections.defaultdict(int)
bad_users = collections.defaultdict(int)
for u in user_item_graph:
for i in user_item_graph[u]:
total_users[i] += 1
if u in known_attackers:
bad_users[i] += 1
attack_covariate = {}
squared_sum = 0.0
for i in total_users:
if i in bad_users:
attack_covariate[i] = [float(bad_users[i])]
squared_sum += float(bad_users[i])**2.0
else:
attack_covariate[i] = [0.0]
if normalize:
for i in attack_covariate:
attack_covariate[i] = [attack_covariate[i][0] / numpy.sqrt(squared_sum)]
return attack_covariate
# Extract weights from a keyed vector object
def extract_weights(keyed_vectors, tokens):
return numpy.array([keyed_vectors[t] for t in tokens])
# Extract all weights in easily usable dict
def extract_all_weights(model_obj, tokens):
"""Extracts numpy-style weights from gensim-style MONET returns.
Args:
model_obj: output from the GloVe call (MONET or otherwise)
tokens: a list of tokens, which are node labels for the original graph
Returns:
return_dict: a keyed dict of numpy matrices ordered by tokens:
W: the sum of the input and output topology embeddings
(If the GloVe model did not have metadata terms, these are None):
Z: the sum of the input and output metadata embeddings
H1: the input metadata transformation
H2: the output metadata transormation
E: if the GloVe model included covariates, this is [W, Z]. Otherwise, [W].
W0: If the MONET unit was used, these are the un-SVD'd topology embeddings
"""
return_dict = {}
# Topology embeddings
return_dict['W'] = (
extract_weights(model_obj['topo_input'], tokens) +
extract_weights(model_obj['topo_outpt'], tokens))
# Metadata embeddings
if model_obj['meta_input'] is not None:
return_dict['Z'] = (
extract_weights(model_obj['meta_input'], tokens) +
extract_weights(model_obj['meta_outpt'], tokens))
return_dict['H1'] = model_obj['meta_trans_input']
return_dict['H2'] = model_obj['meta_trans_outpt']
return_dict['E'] = numpy.concatenate([return_dict['W'], return_dict['Z']],
axis=1)
else:
return_dict['Z'] = None
return_dict['H1'] = None
return_dict['H2'] = None
return_dict['E'] = numpy.concatenate([return_dict['W']], axis=1)
# Base topology embeddings
if 'topo_input_raw' in return_dict:
return_dict['W0'] = (
extract_weights(model_obj['topo_input_raw'], tokens) +
extract_weights(model_obj['topo_outpt_raw'], tokens))
return return_dict
def monet_item_embed(user_item_graph,
item_metadata,
walks=None,
num_walks_node=100,
walk_length=WALK_LENGTH,
VECTOR_SIZE=128,
COVARIATE_SIZE=1,
WINDOW_SIZE=WINDOW_SIZE,
NUM_ITERATIONS=10,
BATCH_SIZE=100,
METHOD='MONET',
use_w2v=False,
DB_LEVEL=1.0,
random_seed=12345):
if walks is None:
# generate random walks
G_undirected = make_movielens_undirected(user_item_graph)
walks = list(
generate_random_walks(
G_undirected,
walks_per_node=num_walks_node,
walk_length=walk_length))
walks = remove_users_from_walks(walks)
random.shuffle(walks)
# counter = collections.Counter([w for walk in walks for w in walk])
flat_walks = []
for walk in walks:
for w in walk:
flat_walks.append(w)
counter = collections.Counter(flat_walks)
tokens = sorted(counter.keys())
# call monet
with tf.Graph().as_default(), tf.Session() as session:
with tf.device('/cpu:0'):
weight_dict_monet = None
if METHOD == 'MONET0':
weight_dict_monet = GloVe(
walks,
session,
metadata=item_metadata,
vector_size=VECTOR_SIZE,
covariate_size=COVARIATE_SIZE,
use_monet=False,
window_size=WINDOW_SIZE,
iters=NUM_ITERATIONS,
batch_size=BATCH_SIZE,
random_seed=random_seed)
elif METHOD == 'MONET':
weight_dict_monet = GloVe(
walks,
session,
metadata=item_metadata,
vector_size=VECTOR_SIZE,
covariate_size=COVARIATE_SIZE,
use_monet=True,
window_size=WINDOW_SIZE,
iters=NUM_ITERATIONS,
db_level=DB_LEVEL,
use_w2v=use_w2v,
batch_size=BATCH_SIZE,
random_seed=random_seed)
else:
weight_dict_monet = GloVe(
walks,
session,
vector_size=VECTOR_SIZE,
use_monet=False,
window_size=WINDOW_SIZE,
iters=NUM_ITERATIONS,
batch_size=BATCH_SIZE,
random_seed=random_seed)
monet_weights = extract_all_weights(weight_dict_monet, tokens)
monet_embeddings = {
x[0]: x[1] for x in zip(tokens, monet_weights['W'].tolist())
}
monet_covariates = None
if METHOD == 'MONET':
monet_covariates = {
x[0]: x[1] for x in zip(tokens, monet_weights['Z'].tolist())}
return monet_embeddings, monet_covariates, monet_weights, tokens
def nlp_baseline(embeddings, metadata, tokens):
# prepare attack direction vector
attack_vector = numpy.zeros(shape=embeddings['i_1'].shape)
safe_vector = numpy.zeros(shape=embeddings['i_1'].shape)
safe_count = 0
for token in tokens:
if metadata[token][0] > 0.0:
attack_vector = attack_vector + embeddings[token] * metadata[token][0]
else:
safe_vector = safe_vector + embeddings[token]
safe_count += 1
attack_vector = (
attack_vector / float(numpy.sum(list(metadata.values()))) -
safe_vector / float(safe_count))
# regress out of embedding matrix
embed_mat = extract_weights(embeddings, tokens)
projection_diff = numpy.matmul(
(numpy.matmul(embed_mat, numpy.transpose(attack_vector)) /
numpy.dot(attack_vector, attack_vector))[:, numpy.newaxis],
attack_vector[numpy.newaxis, :])
return embed_mat - projection_diff
# Scoring Utils
def score_embedding(embedding, target_ids, attacker_ids):
"""Computes the distances in unit ball space of a set of one embeddings from another."""
embeddings_targets = [
embedding[x] / numpy.linalg.norm(embedding[x]) for x in target_ids
]
embeddings_attackers = [
embedding[x] / numpy.linalg.norm(embedding[x]) for x in attacker_ids
]
distances = numpy.inner(embeddings_targets, embeddings_attackers)
return distances
# score_ranking_attack
def score_ranking_attack(embedding,
to_shill_ids,
target_ids,
number_neighbors=20):
normalized_embeddings = {}
for x in embedding:
norm_scale = numpy.linalg.norm(embedding[x])
normalized_embeddings[x] = embedding[x] / (
norm_scale if norm_scale > 1e-10 else 1.0)
ordered_ids = list(sorted(embedding.keys()))
ordered_embeddings = []
ordered_id_targets = {}
for idx, x in enumerate(ordered_ids):
# remove non-item embeddings
if 'i' in x:
if x in to_shill_ids:
ordered_id_targets[idx] = x
ordered_embeddings.append(normalized_embeddings[x])
X = numpy.vstack(ordered_embeddings)
nbrs = NearestNeighbors(
n_neighbors=number_neighbors + 1, algorithm='brute',
metric='cosine').fit(X)
X_find = numpy.vstack([normalized_embeddings[x] for x in target_ids])
distances, indices = nbrs.kneighbors(X_find)
total_found_in_topk = 0
for row in indices:
for item in row[1:]:
if item in ordered_id_targets:
total_found_in_topk += 1
return distances, indices, total_found_in_topk
def row_normalize(mat):
row_sqss = numpy.sqrt(numpy.sum(mat**2.0, axis=1))
return mat / row_sqss[:, None]
def embedding_similarity(embeddings, scale_embeddings=False):
if scale_embeddings:
embeddings = row_normalize(embeddings)
return numpy.matmul(embeddings, numpy.transpose(embeddings))
def compute_distance_correlation(embedding_dict1,
embedding_dict2,
tokens,
unattacked_indx,
scale_embeddings=True):
distances1 = embedding_similarity(
extract_weights(embedding_dict1, tokens)[unattacked_indx, :],
scale_embeddings=scale_embeddings)
distances2 = embedding_similarity(
extract_weights(embedding_dict2, tokens)[unattacked_indx, :],
scale_embeddings=scale_embeddings)
return pearsonr(distances1.flatten(), distances2.flatten())[0]
# Eval Utils
def save_embeddings(weights, save_dir, tokens, name):
with open(os.path.join(save_dir, '%s_embeddings.txt' % name), 'w') as f:
numpy.savetxt(f, extract_weights(weights, tokens))
def load_embeddings(save_dir, name, tokens):
with open(os.path.join(save_dir, '%s_embeddings.txt' % name)) as f:
weights = numpy.loadtxt(f)
return {t: weights for (t, weights) in zip(tokens, weights)}
def nearest_neighbors_by_score(neighbors, scores, n=10):
index_order = numpy.argsort(scores)[-n:]
return [neighbors[i] for i in numpy.flip(index_order)]
def get_sorted_distances(embedding_dict,
tokens,
normalize_rows=False,
normalize_cols=False):
embeddings = extract_weights(embedding_dict, tokens)
if normalize_rows:
E = row_normalize(embeddings)
if normalize_cols:
E = scale(embeddings, with_mean=False)
else:
E = embeddings
dists = numpy.matmul(E, numpy.transpose(E))
sorted_distances = {}
for i, t in enumerate(tokens):
dist_v = dists[i]
sorted_distances[t] = [
(tokens[j], dist_v[j]) for j in numpy.flip(numpy.argsort(dist_v))
]
return sorted_distances
def avg_mrrs(embeddings,
cooccurrences,
tokens,
ignore_answers=[],
nns=[1, 5, 10, 20],
normalize_rows=False,
normalize_cols=False):
max_nn = numpy.max(nns)
mrrs = {nn: {} for nn in nns}
sorted_distances = get_sorted_distances(embeddings, tokens, normalize_rows,
normalize_cols)
ignore_answers_set = set(ignore_answers)
for item_label in tokens:
neighbors = []
scores = []
item_cdict = cooccurrences[item_label]
for n, s in sorted(item_cdict.items()):
if n != item_label and n not in ignore_answers_set:
neighbors.append(n)
scores.append(s)
nearest_neighbors = nearest_neighbors_by_score(neighbors, scores, max_nn)
nn_sets = {nn: set(nearest_neighbors[:nn]) for nn in nns}
mrr_scores = {nn: [] for nn in nns}
i = 0
while nn_sets[max_nn]: # this used to explicitly check length
ns_pair = sorted_distances[item_label][i]
for nn in nns:
if ns_pair[0] in nn_sets[nn]:
mrr_scores[nn].append(1.0 / (i + 1))
nn_sets[nn].remove(ns_pair[0])
i += 1
for nn in nns:
mrrs[nn][item_label] = numpy.mean(mrr_scores[nn])
return mrrs
def compute_mrr_curve(embeddings,
cooccurrences,
tokens,
ignore_answers=[],
nns=list(range(1, 21)),
target_set=None,
normalize_rows=False,
normalize_cols=False):
mrrs = avg_mrrs(embeddings, cooccurrences, tokens, ignore_answers, nns,
normalize_rows, normalize_cols)
if target_set is None:
return [
numpy.mean(list(mrr_dict.values())) for _, mrr_dict in mrrs.items()
]
else:
return [
numpy.mean([v
for _, v in mrr_dict.items()])
for _, mrr_dict in mrrs.items()
]
# Experiment Loop
# load movielens
G_prime, user_set, item_set = load_movielens_graph(DATA_FILE)
# Methods vec
methods = ['deepwalk', 'glove', 'monet0', 'monet', 'random', 'nlp']
# Helper function to get method name from debias (DB) level
monet_alpha_encoder = lambda x: 'monet%0.2f' % x
# Set up debias levels
DB_LEVELS = [v / 100.0 for v in list(range(75, 100, 5)) + [50, 25]]
methods.extend([monet_alpha_encoder(db_level) for db_level in DB_LEVELS])
G_prime = make_movielens_undirected(G_prime)
G_prime = filter_by_weight(G_prime, min_weight=4)
user_set = set([u for u in G_prime if u[0] == 'u'])
item_set = set([i for i in G_prime if i[0] == 'i'])
results = []
for exp_no in range(NUMBER_OF_EXPERIMENTS):
print('Performing experiment: ' + str(exp_no))
time_dict = {}
exp_dir = os.path.join(SAVE_DIR, 'experiment%d' % exp_no)
EXP_SEED = SEED + exp_no
if not os.path.isdir(exp_dir):
os.mkdir(exp_dir)
if OVERWRITE_EMBEDDINGS:
# select attackers
numpy.random.seed(EXP_SEED)
attackers = list(
numpy.random.choice(
sorted(user_set),
size=int(len(G_prime) * FRACTION_ATTACKERS),
replace=False))
to_shills = list(
numpy.random.choice(
sorted(item_set), size=NUMBER_TO_SHILL, replace=False))
known_attackers = list(
numpy.random.choice(
attackers,
size=int(len(attackers) * FRACTION_ATTACKERS_KNOWN),
replace=False))
# uniform random attack
targets = list(
numpy.random.choice(
sorted(item_set), size=NUMBER_TARGETS, replace=False))
G_attacked = top_k_high_degree_attack(
copy.deepcopy(G_prime),
target_items=to_shills,
attackers=attackers,
to_attack=targets,
random_seed=EXP_SEED)
G_attacked = make_movielens_undirected(G_attacked)
item_metadata = make_attack_covariate(G_attacked, set(known_attackers))
item_metadata_normalized = make_attack_covariate(G_attacked,
set(known_attackers), True)
# Save the metadata
with open(os.path.join(exp_dir, 'item_metadata.txt'), 'w') as f:
f.write(json.dumps(item_metadata))
embeddings_orig, walks_orig = DeepWalkMovielens(
G_prime,
remove_users=True,
walk_length=WALK_LENGTH,
random_seed=EXP_SEED)
stime = time.time()
embeddings_prime, walks_prime = DeepWalkMovielens(
G_attacked,
remove_users=True,
walk_length=WALK_LENGTH,
random_seed=EXP_SEED)
time_dict['deepwalk_time'] = time.time() - stime
# Save the walks
with open(os.path.join(exp_dir, 'walks_orig.txt'), 'w') as f:
f.write(json.dumps(walks_orig))
with open(os.path.join(exp_dir, 'walks_prime.txt'), 'w') as f:
f.write(json.dumps(walks_prime))
# Glove
stime = time.time()
glove_weights = monet_item_embed(
G_attacked,
item_metadata,
NUM_ITERATIONS=NUMBER_GLOVE_ITERATIONS,
METHOD='GloVe',
random_seed=EXP_SEED)
time_dict['glove_time'] = time.time() - stime
glove_topology, glove_covariates, all_glove_weights, tokens = glove_weights
print('done with glove')
# Save the tokens
with open(os.path.join(exp_dir, 'tokens.txt'), 'w') as f:
f.write(json.dumps(tokens))
# After getting tokens, able to save both deepwalk and glove weights
save_embeddings(embeddings_prime, exp_dir, tokens, 'deepwalk')
save_embeddings(glove_topology, exp_dir, tokens, 'glove')
# MONET0
stime = time.time()
monet0_weights = monet_item_embed(
G_attacked,
item_metadata_normalized,
NUM_ITERATIONS=NUMBER_GLOVE_ITERATIONS,
METHOD='MONET0',
random_seed=EXP_SEED)
time_dict['monet0_time'] = time.time() - stime
monet0_topology, monet0_covariates, all_monet0_weights, tokens = monet0_weights
save_embeddings(monet0_topology, exp_dir, tokens, 'monet0')
# MONET
stime = time.time()
monet_weights = monet_item_embed(
G_attacked,
item_metadata_normalized,
NUM_ITERATIONS=NUMBER_GLOVE_ITERATIONS,
METHOD='MONET',
random_seed=EXP_SEED)
time_dict['monet_time'] = time.time() - stime
monet_topology, monet_covariates, all_monet_weights, tokens = monet_weights
save_embeddings(monet_topology, exp_dir, tokens, 'monet')
# MONET with different regs
for db_level in DB_LEVELS:
monet_weights = monet_item_embed(
G_attacked,
item_metadata_normalized,
NUM_ITERATIONS=NUMBER_GLOVE_ITERATIONS,
METHOD='MONET',
DB_LEVEL=db_level,
random_seed=EXP_SEED)
monet_topology, monet_covariates, all_monet_weights, tokens = monet_weights
save_embeddings(monet_topology, exp_dir, tokens,
monet_alpha_encoder(db_level))
# Save the configuration of the experiment
exp_config = {
'targets': targets,
'items_to_shill': to_shills,
'attackers': attackers,
'known_attackers': known_attackers,
'NEIGHBORS_TO_EVAL': NEIGHBORS_TO_EVAL,
'FRACTION_ATTACKERS': FRACTION_ATTACKERS,
'FRACTION_ATTACKERS_KNOWN': FRACTION_ATTACKERS_KNOWN,
'NUMBER_TO_SHILL': NUMBER_TO_SHILL,
'NUMBER_TARGETS': NUMBER_TARGETS,
'NUMBER_TO_ATTACK': NUMBER_TO_ATTACK,
'SEED': SEED,
'NUMBER_GLOVE_ITERATIONS': NUMBER_GLOVE_ITERATIONS
}
with open(os.path.join(exp_dir, 'exp_config.txt'), 'w') as f:
f.write(json.dumps(exp_config))
with open(os.path.join(exp_dir, 'timing_results.txt'), 'w') as f:
f.write(json.dumps(time_dict))
if DO_EVAL:
# Load the tokens and exp_config
with open(os.path.join(exp_dir, 'tokens.txt')) as f:
tokens = json.loads(f.read())
with open(os.path.join(exp_dir, 'exp_config.txt')) as f:
exp_config = json.loads(f.read())
# Load the walks
with open(os.path.join(exp_dir, 'walks_orig.txt')) as f:
walks_orig = json.loads(f.read())
with open(os.path.join(exp_dir, 'walks_prime.txt')) as f:
walks_prime = json.loads(f.read())
attacked_vids = exp_config['targets'] + exp_config['items_to_shill']
# Load the embeddings
topology_weights = {
method: load_embeddings(exp_dir, method, tokens)
for method in methods
if method not in ['decor_max', 'decor_sum', 'random', 'nlp']
}
# Load timing results
with open(os.path.join(exp_dir, 'timing_results.txt')) as f:
time_dict = json.loads(f.read())
# Load the item metadata
with open(os.path.join(exp_dir, 'item_metadata.txt')) as f:
item_metadata = json.loads(f.read())
# Compute standard fairness decorrelated matrices
stime = time.time()
topology_weights['nlp'] = dict(
zip(tokens,
nlp_baseline(topology_weights['glove'], item_metadata, tokens)))
time_dict['nlp_time'] = time.time() - stime
# Compute metrics
bad_items_count = {}
for method in topology_weights:
bad_items_count.update({
method: score_ranking_attack(
topology_weights[method],
exp_config['items_to_shill'],
exp_config['targets'],
number_neighbors=exp_config['NEIGHBORS_TO_EVAL'])[2]})
exp_result = {'experiment': exp_no, 'SEED': SEED}
exp_result.update(time_dict)
exp_result.update(bad_items_count)
# Compute random embeddings
numpy.random.seed(EXP_SEED)
topology_weights['random'] = {
t: numpy.random.normal(size=(128,)) for t in topology_weights['glove']
}
# Compute distance set correlations
attacked_set = set(attacked_vids)
unattacked_indx = [i for i, t in enumerate(tokens) if t not in attacked_set]
for method in topology_weights:
exp_result.update({
('%s_vs_deepwalk_distcorr' % method):
compute_distance_correlation(topology_weights[method],
topology_weights['deepwalk'], tokens,
unattacked_indx)
})
exp_result.update({
('%s_vs_glove_distcorr' % method):
compute_distance_correlation(topology_weights[method],
topology_weights['glove'], tokens,
unattacked_indx)
})
results.append(exp_result)
# Compute cooccurrences
(cooccurrence_list, index_vocab_list, vocab_index_lookup,
tokenized_cooccurrences) = count_cooccurrences(walks_prime, 5)
# Compute mrr curves
for method in topology_weights:
normalize_rows = False
normalize_cols = False
exp_result.update({
('%s_mrr_curve_full' % method): [
compute_mrr_curve(
topology_weights[method],
tokenized_cooccurrences,
tokens,
normalize_rows=normalize_rows,
normalize_cols=normalize_cols)
]
})
exp_result.update({
('%s_mrr_curve_noattacked' % method): [
compute_mrr_curve(
topology_weights[method],
tokenized_cooccurrences,
tokens,
ignore_answers=attacked_vids,
normalize_rows=normalize_rows,
normalize_cols=normalize_cols)
]
})
exp_result.update({
('%s_mrr_curve_noattacked_justtargets' % method): [
compute_mrr_curve(
topology_weights[method],
tokenized_cooccurrences,
tokens,
ignore_answers=attacked_vids,
target_set=exp_config['targets'],
normalize_rows=normalize_rows,
normalize_cols=normalize_cols)
]
})
with open(os.path.join(exp_dir, str(exp_no) + '.txt'), 'w') as f:
f.write(json.dumps(exp_result))
| apache-2.0 |
JsNoNo/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
combogenomics/DuctApe | ductape/kegg/kegg.py | 1 | 85620 | #!/usr/bin/env python
"""
Kegg
Kegg Library
Kegg data fetching
"""
import sys
if sys.version_info[0] < 3:
import Queue as queue
from urllib2 import quote
from urllib2 import urlopen
else:
from urllib.request import urlopen
from urllib.parse import quote
import queue
from ductape.common.commonthread import CommonThread
from ductape.common.utils import get_span
from ductape.common.utils import isOnline
from ductape.kegg.web import kheader
from matplotlib import colors
import logging
import os
import shutil
import threading
import time
import random
__author__ = "Marco Galardini"
################################################################################
# Log setup
logger = logging.getLogger('ductape.kegg')
################################################################################
# Constants
avoidedPaths = set(['%s%s%s' % (prefix1, prefix2, value)
for prefix1 in ['', 'path:']
for prefix2 in ['rn', 'ko', 'map']
for value in ['01100', '01110', '01120',
'01200', '01210', '01212',
'01230', '01220']
])
################################################################################
# Classes
class MapParser(object):
'''
Takes an HTML of a Kegg map and returns a list of ready-to-use html lines
'''
def __init__(self,content):
self.html = content
self.map = []
self.parse()
def parse(self):
b = False
for l in self.html.split('\n'):
l = l.lstrip().rstrip().strip()
if '<map' in l[:4]:
b = True
self.map.append(l)
elif '<area' in l[:6] and b:
self.map.append(l)
elif '</map' in l[:5] and b:
self.map.append(l)
break
return self.map
class KeggAPI(object):
'''
Class KeggAPI
Connects to KEGG API and performs various tasks
Fail-safe: if a request fails it tries again and again
All the results are stored in the attribute result, as well as the inputs
Docs:
http://www.kegg.jp/kegg/docs/keggapi.html
http://www.kegg.jp/kegg/rest/weblink.html
'''
def __init__(self, keeptrying=False):
self.baseurl = 'http://www.kegg.jp/'
self._apiurl = 'http://rest.kegg.jp/'
self._maplink = 'http://www.kegg.jp/kegg-bin/show_pathway?'
self.failed = False
self.keeptrying = keeptrying
self.clean()
def clean(self):
self.input = None
self.result = None
self.failed = False
def getEntryTag(self, entry, tag):
'''
Get the tag content inside a kegg entry (flat file)
'''
b = False
res = ''
for line in entry.split('\n'):
if line.startswith(tag):
res += line.rstrip().lstrip(tag).lstrip()
b = True
elif b and line[0] == ' ':
res += ' ' + line.rstrip().lstrip()
elif b and line[0] != ' ':
b = False
return res
if res == '':
return None
return res.lstrip()
def getLinkTag(self, entry, tag):
'''
Get the tag content inside a kegg entry (flat file)
This variant function extract links from an entry
'''
b = False
for line in entry.split('\n'):
if line.startswith(tag):
yield line.rstrip().lstrip(tag).lstrip().split()[-1]
b = True
elif b and line[0] == ' ':
yield line.rstrip().lstrip().split()[-1]
elif b and line[0] != ' ':
b = False
def parseLinks(self, links):
'''
Parse the results of
'''
d = {}
for line in links.split('\n'):
if line == '' or '\t' not in line:continue
k, v = line.split('\t')
if k not in d:
d[k] = []
d[k].append(v)
if len(d) == 0:
return None
return d
def getRelease(self, dbver):
'''
Takes a string like "Release 64.0+/11-13, Nov 12"
and returns the release number, as a float (useful for comparison)
if it fails returns None
'''
try:
s = dbver.split(' ')
release = s[1]
while True:
try:
release = float(release)
return release
except:
release = release[:-1]
except:
logger.debug('Could not parse KEGG database version (%s)'%dbver)
return None
def getDBVersion(self, retries=8):
'''
Get the KEGG DB version
Returns a tuple: full version string , release number (None if unparsable)
'''
attempts = 0
while True:
try:
self.input = None
logger.debug('Looking for KEGG db version')
url = self._apiurl + quote('info/kegg')
data = urlopen(url, timeout=20).read().decode('utf-8').split('\n')
line = data[1].split(' ')[1]
self.result = (line, self.getRelease(line))
return
except Exception as e:
attempts += 1
logger.debug('info failed! Attempt %d'
%attempts)
logger.debug('%s'%str(e))
time.sleep((2 + random.random())*attempts)
try:
logger.debug(url)
except:pass
if self.keeptrying:continue
if attempts >= retries:
self.failed = True
logger.warning('info failed!')
return
def getTitle(self, entries, otherTags=[], retries=8):
'''
Get the title of a specific KEGG object
Default behaviour is to return NAME and DEFINITION tags
Additional tags can be provided in otherTags
'''
attempts = 0
while True:
try:
self.input = entries
logger.debug('Looking for title for %d KEGG entries'%len(entries))
url = ''
for entry in entries:
url += '%s+'%entry
# Dummy entry to avoid a rare bug when all the provided entries
if 'cpd:C00099' not in entries or 'C00099' not in entries:
url += 'cpd:C00099'
#
url = url.rstrip('+')
url = self._apiurl + 'get/' + quote(url)
data = urlopen(url, timeout=20).read().decode('utf-8')
self.result = {}
for lines in data.split('///'):
if len(lines) == 1:continue
try:
shortID = self.getEntryTag(lines,'ENTRY').split(' ')[0]
for longID in self.input:
if shortID in longID:
self.result[longID] = [
self.getEntryTag(lines, 'NAME'),
self.getEntryTag(lines, 'DEFINITION')
]
# TODO: a more general approach for this
for tag in otherTags:
value = self.getEntryTag(lines, tag)
self.result[longID].append(value)
except:
continue
# Check that every input has a result
for entry in entries:
if entry not in self.result:
self.result[entry] = ['','']
return
except Exception as e:
attempts += 1
logger.debug('get failed! Attempt %d'
%attempts)
logger.debug('%s'%str(e))
time.sleep((2 + random.random())*attempts)
try:
logger.debug(url)
except:pass
if self.keeptrying:continue
if attempts >= retries:
self.failed = True
logger.warning('get failed!')
return
def getRPair(self, entries, retries=8):
'''
Similar to getTitle, but targeting specific features of RPair
Specifically the two co_id involved and the kind of interaction
'''
attempts = 0
while True:
try:
self.input = entries
logger.debug('Looking for details on %d RPair entries'%len(entries))
url = ''
for entry in entries:
url += '%s+'%entry
# Dummy entry to avoid a rare bug when all the provided entries
if 'rp:RP00001' not in entries or 'RP00001' not in entries:
url += 'rp:RP00001'
#
url = url.rstrip('+')
url = self._apiurl + 'get/' + quote(url)
data = urlopen(url, timeout=20).read().decode('utf-8')
self.result = {}
for lines in data.split('///'):
if len(lines) == 1:continue
try:
shortID = self.getEntryTag(lines,'ENTRY').split(' ')[0]
for longID in self.input:
if shortID in longID:
co1, co2 = self.getEntryTag(lines, 'NAME').split('_')
#
if not co1.startswith('cpd:'):
co1 = 'cpd:' + co1
if not co2.startswith('cpd:'):
co2 = 'cpd:' + co2
#
kind = self.getEntryTag(lines, 'TYPE')
self.result[longID] = [co1,co2,kind]
except:
continue
# Check that every input has a result
for entry in entries:
if entry not in self.result:
self.result[entry] = ['','','']
return
except Exception as e:
attempts += 1
logger.debug('get (rpair) failed! Attempt %d'
%attempts)
logger.debug('%s'%str(e))
time.sleep((2 + random.random())*attempts)
try:
logger.debug(url)
except:pass
if self.keeptrying:continue
if attempts >= retries:
self.failed = True
logger.warning('get (rpair) failed!')
return
def getIDListFromDB(self, db='pathway', retries=8):
'''
Get all the IDs from a specific database
Default: pathway
'''
attempts = 0
while True:
try:
self.input = db
logger.debug('Looking for KEGG IDs from db %s'%db)
url = self._apiurl + 'list/%s/' % quote(db)
data = urlopen(url, timeout=20).read().decode('utf-8')
self.result = set([x.split('\t')[0] for x in data.split('\n')])
try:
self.result.remove('')
except:pass
self.result = list(self.result)
return
except Exception as e:
attempts += 1
logger.debug('list (%s) failed! Attempt %d'
%(db,attempts))
logger.debug('%s'%str(e))
time.sleep((2 + random.random())*attempts)
try:
logger.debug(url)
except:pass
if self.keeptrying:continue
if attempts >= retries:
self.failed = True
logger.warning('list (%s) failed!'%db)
return
def getReactions(self, ko_ids, retries=8):
'''
Get the reaction IDs for a given KO list
'''
attempts = 0
while True:
try:
self.input = ko_ids
logger.debug('Looking for KEGG reactions from %d KO IDs'%len(ko_ids))
url = ''
for ko_id in ko_ids:
url += '%s+'%ko_id
url = url.rstrip('+')
url = self._apiurl + 'link/reaction/' + quote(url)
data = urlopen(url, timeout=20).read().decode('utf-8')
self.result = self.parseLinks(data)
return
except Exception as e:
attempts += 1
logger.debug('link (reaction) failed! Attempt %d'
%attempts)
logger.debug('%s'%str(e))
time.sleep((2 + random.random())*attempts)
try:
logger.debug(url)
except:pass
if self.keeptrying:continue
if attempts >= retries:
self.failed = True
logger.warning('link (reaction) failed!')
return
def getPathways(self, re_ids, retries=8):
'''
Get the pathway IDs for a given reaction list
'''
attempts = 0
while True:
try:
self.input = re_ids
logger.debug('Looking for KEGG pathways from %d RE IDs'%len(re_ids))
url = ''
for re_id in re_ids:
url += '%s+'%re_id
url = url.rstrip('+')
url = self._apiurl + 'link/pathway/' + quote(url)
data = urlopen(url, timeout=20).read().decode('utf-8')
self.result = self.parseLinks(data)
return
except Exception as e:
attempts += 1
logger.debug('link (pathway) failed! Attempt %d'
%attempts)
logger.debug('%s'%str(e))
time.sleep((2 + random.random())*attempts)
try:
logger.debug(url)
except:pass
if self.keeptrying:continue
if attempts >= retries:
self.failed = True
logger.warning('link (pathway) failed!')
return
def getReactionsByComp(self, co_ids, retries=8):
'''
Get the reactions IDs for a given compound list
'''
attempts = 0
while True:
try:
self.input = co_ids
logger.debug('Looking for KEGG reactions from %d CO IDs'%len(co_ids))
url = ''
for co_id in co_ids:
url += '%s+'%co_id
url = url.rstrip('+')
url = self._apiurl + 'link/reaction/' + quote(url)
data = urlopen(url, timeout=20).read().decode('utf-8')
self.result = self.parseLinks(data)
return
except Exception as e:
attempts += 1
logger.debug('link (reaction) failed! Attempt %d'
%attempts)
logger.debug('%s'%str(e))
time.sleep((2 + random.random())*attempts)
try:
logger.debug(url)
except:pass
if self.keeptrying:continue
if attempts >= retries:
self.failed = True
logger.warning('link (reaction) failed!')
return
def getReactionsFromPath(self, path_ids, retries=8):
'''
Get the reaction IDs for a given pathway list
'''
attempts = 0
while True:
try:
self.input = path_ids
logger.debug('Looking for KEGG reactions from %d PATH IDs'%len(path_ids))
url = ''
for path_id in path_ids:
url += '%s+'%path_id
url = url.rstrip('+')
url = self._apiurl + 'link/reaction/' + quote(url)
data = urlopen(url, timeout=20).read().decode('utf-8')
self.result = self.parseLinks(data)
return
except Exception as e:
attempts += 1
logger.debug('link (reaction) failed! Attempt %d'
%attempts)
logger.debug('%s'%str(e))
time.sleep((2 + random.random())*attempts)
try:
logger.debug(url)
except:pass
if self.keeptrying:continue
if attempts >= retries:
self.failed = True
logger.warning('link (reaction) failed!')
return
def getRPairsFromReaction(self, entries, retries=8):
'''
Get the rpair IDs for a given reaction list
'''
attempts = 0
while True:
try:
self.input = entries
logger.debug('Looking for RClass for %d KEGG entries'%len(entries))
url = ''
for entry in entries:
url += '%s+'%entry
# Dummy entry to avoid a rare bug when all the provided entries
if 'cpd:C00099' not in entries or 'C00099' not in entries:
url += 'cpd:C00099'
#
url = url.rstrip('+')
url = self._apiurl + 'get/' + quote(url)
data = urlopen(url, timeout=20).read().decode('utf-8')
self.result = {}
for lines in data.split('///'):
if len(lines) == 1:continue
try:
shortID = self.getEntryTag(lines,'ENTRY').split(' ')[0]
for longID in self.input:
if shortID in longID:
for rclass in self.getLinkTag(lines, 'RCLASS'):
self.result[longID] = self.result.get(longID,
set())
self.result[longID].add(rclass)
except:
continue
return
except Exception as e:
attempts += 1
logger.debug('link (rpair) failed! Attempt %d'
%attempts)
logger.debug('%s'%str(e))
time.sleep((2 + random.random())*attempts)
try:
logger.debug(url)
except:pass
if self.keeptrying:continue
if attempts >= retries:
self.failed = True
logger.warning('link (rpair) failed!')
return
def getCompoundsFromReaction(self, re_ids, retries=8):
'''
Get the compound IDs for a given reaction list
'''
attempts = 0
while True:
try:
self.input = re_ids
logger.debug('Looking for KEGG compounds from %d RE IDs'%len(re_ids))
url = ''
for re_id in re_ids:
url += '%s+'%re_id
url = url.rstrip('+')
url = self._apiurl + 'link/compound/' + quote(url)
data = urlopen(url, timeout=20).read().decode('utf-8')
self.result = self.parseLinks(data)
return
except Exception as e:
attempts += 1
logger.debug('link (compound) failed! Attempt %d'
%attempts)
logger.debug('%s'%str(e))
time.sleep((2 + random.random())*attempts)
try:
logger.debug(url)
except:pass
if self.keeptrying:continue
if attempts >= retries:
self.failed = True
logger.warning('link (compound) failed!')
return
def getCompoundsFromPath(self, path_ids, retries=8):
'''
Get the compound IDs for a given pathway list
'''
attempts = 0
while True:
try:
self.input = path_ids
logger.debug('Looking for KEGG compounds from %d PATH IDs'%len(path_ids))
url = ''
for path_id in path_ids:
url += '%s+'%path_id
url = url.rstrip('+')
url = self._apiurl + 'link/compound/' + quote(url)
data = urlopen(url, timeout=20).read().decode('utf-8')
self.result = self.parseLinks(data)
return
except Exception as e:
attempts += 1
logger.debug('link (compound) failed! Attempt %d'
%attempts)
logger.debug('%s'%str(e))
time.sleep((2 + random.random())*attempts)
try:
logger.debug(url)
except:pass
if self.keeptrying:continue
if attempts >= retries:
self.failed = True
logger.warning('link (compound) failed!')
return
def getHTMLColoredPathway(self, path_id, obj_list, color_list,
border_list=None, retries=8):
'''
Get the URL of the colored pathway and return its content
If it fails, an exception is thrown
'''
attempts = 0
while True:
try:
self.input = path_id
# Fix color codes
hblue = '#0000FF'.replace('#', '%23')
for i in range(len(color_list)):
if '#' in color_list[i]:
color_list[i] = color_list[i].replace('#', '%23')
#
logger.debug('Looking for KEGG colored map from %s'%path_id)
url = path_id.lstrip('path:') + '/default%3dblue/'
for i in range(len(obj_list)):
url += obj_list[i]
# do not waste precious characters
if color_list[i] == hblue:
if border_list is not None and border_list[i] is not None:
url += '%09,' + border_list[i] + '/'
else:
url += '/'
#
else:
if border_list is not None and border_list[i] is not None:
url += '%09' + color_list[i] + ',' + border_list[i] + '/'
else:
url += '%09' + color_list[i] + '/'
# Cannot quote this url, no colored pathway can be obtained then
#url = self._maplink + quote(url)
url = self._maplink + url
logger.debug(url)
# check that URL length is below 2000
if len(url) > 2000:
logger.warning('URL too long for pathway %s, will skip'%path_id)
self.result = ''
return
sock=urlopen(url, timeout=60)
self.result = sock.read().decode('utf-8')
sock.close()
return
except Exception as e:
attempts += 1
logger.debug('show_pathway failed! Attempt %d'
%attempts)
logger.debug('%s'%str(e))
time.sleep((2 + random.random())*attempts)
if self.keeptrying:continue
if attempts >= retries:
self.failed = True
logger.warning('show_pathway failed!')
return
class KeggColor(object):
'''
Class KeggColor
Holds the color information to be passed to MapsFetcher
One object for each pathway
'''
def __init__(self, path, htmlmap= '', reactions={}, compounds={},
borders={}):
self.path = path
self.htmlmap = htmlmap
self.reactions = reactions
self.compounds = compounds
# Objects that need to have a coloured border
self.borders = borders
def setMap(self, htmlmap):
self.htmlmap = htmlmap
def setReactions(self, reactions):
self.reactions = reactions
def setCompounds(self, compounds):
self.compounds = compounds
def setBorders(self, borders):
self.borders = borders
def getAll(self):
'''
Returns a tuple --> objects, color
'''
objs = [x for x in self.reactions]
colors = [self.reactions[x] for x in self.reactions]
objs += [x for x in self.compounds]
colors += [self.compounds[x] for x in self.compounds]
return objs,colors
def getBorders(self):
'''
Returns a tuple --> objects, color
'''
objs = [x for x in self.reactions]
objs += [x for x in self.compounds]
colors = []
for x in list(self.reactions.keys())+list(self.compounds.keys()):
if x in list(self.borders.keys()):
colors.append(self.borders[x])
else:
colors.append(None)
return objs,colors
class KeggDetails(object):
'''
Class KoDetails
All the informations returned by Mappers are contained here
'''
def __init__(self):
# Details
self.ko = None
self.react = None
self.comp = None
self.path = None
self.rpair = None
# Links
self.koreact = None
self.pathreact = None
self.pathcomp = None
self.compreact = None
self.reactcomp = None
self.reactrpair = None
self.rpairreact = None
# Maps
self.pathmaps = None
def _purgeDetails(self,det):
erase = []
if not det:
return det
for key, value in list(det.items()):
if not value:
erase.append(key)
for key in erase:
del det[key]
return det
def setDetails(self, ko=None, react=None, comp=None, path=None, rpair=None):
self.ko = self._purgeDetails(ko)
self.react = self._purgeDetails(react)
self.comp = self._purgeDetails(comp)
self.path = self._purgeDetails(path)
self.rpair = self._purgeDetails(rpair)
def setLinks(self, koreact=None, pathreact=None, pathcomp=None,
compreact=None, reactcomp=None, reactrpair=None,
rpairreact=None):
self.koreact = {}
if koreact:
for k,v in list(koreact.items()):
self.koreact[k] = []
for i in v:
self.koreact[k].append(str(i))
self.pathreact = {}
if pathreact:
for k,v in list(pathreact.items()):
self.pathreact[k] = []
for i in v:
self.pathreact[k].append(str(i))
self.pathcomp = {}
if pathcomp:
for k,v in list(pathcomp.items()):
self.pathcomp[k] = []
for i in v:
self.pathcomp[k].append(str(i))
self.compreact = {}
if compreact:
for k,v in list(compreact.items()):
self.compreact[k] = []
for i in v:
self.compreact[k].append(str(i))
self.compreact = {}
self.reactcomp = {}
if reactcomp:
for k,v in list(reactcomp.items()):
self.reactcomp[k] = []
for i in v:
self.reactcomp[k].append(str(i))
self.reactrpair = {}
if reactrpair:
for k,v in list(reactrpair.items()):
self.reactrpair[k] = []
for i in v:
self.reactrpair[k].append(str(i))
self.rpairreact = {}
if rpairreact:
for k,v in list(rpairreact.items()):
self.rpairreact[k] = []
for i in v:
self.rpairreact[k].append(str(i))
def setMaps(self, maps):
self.pathmaps = maps
def getKO(self):
return self.ko
def getReact(self):
return self.react
def getComp(self):
return self.comp
def getPath(self):
return self.path
def getKOLinks(self):
return self.koreact
def getCompLinks(self):
return self.comppath
def getPathLinks(self):
return self.pathreact, self.pathcomp
def getMaps(self):
return self.pathmaps
class BaseKegg(CommonThread):
def __init__(self, threads=10, keeptrying=False, queue=queue.Queue()):
CommonThread.__init__(self,queue)
# Kegg connection
self.handlers = []
self._hindex = 0
self.numThreads = threads
for i in range(self.numThreads):
obj = KeggAPI(keeptrying)
self.handlers.append(obj)
self.cleanHandlers()
def cleanHandlers(self):
for handler in self.handlers:
handler.clean()
self._hindex = 0
def getHandler(self):
if self._hindex >= len(self.handlers):
self._hindex = 0
handler = self.handlers[self._hindex]
self._hindex += 1
return handler
def checkConnection(self):
'''
Check if there are connection problems
First check the two IP addresses, then the URL
'''
check = [KeggAPI().baseurl, KeggAPI()._apiurl]
online = False
for addr in check:
try:
isOnline(addr)
online = True
except:
logger.debug('address %s not working'%addr)
if not online:
raise Exception('KEGG seems to be offline')
class BaseMapper(BaseKegg):
def __init__(self, threads=10, avoid=[], keeptrying=False,
queue=queue.Queue()):
BaseKegg.__init__(self, threads=threads, keeptrying=keeptrying,
queue=queue)
# Skip these
self.avoid = avoid
# Results
self.reactdet = {}
self.rpairdet = {}
self.pathdet = {}
self.pathreact = {}
self.pathcomp = {}
self.pathmap = {}
self.compdet = {}
self.reactpath = {}
self.reactcomp = {}
self.compreact = {}
self.rpairreact = {}
self.reactrpair = {}
# Output
self.result = None
def getReactDetails(self):
pieces = [p for p in get_span(list(self.reactdet.keys()), 9)]
for piece in get_span(pieces, self.numThreads):
if self.killed:
logger.debug('Exiting for a kill signal')
return
self.cleanHandlers()
self._substatus += len([i for p in piece for i in p])
if self._substatus > self._maxsubstatus:
self._substatus = self._maxsubstatus
self.updateStatus(sub=True)
threads = []
for ids in piece:
remove = set()
for i in ids:
if i in self.avoid:
remove.add(i)
for i in remove:
ids.remove(i)
if len(ids) == 0:
continue
obj = threading.Thread(
target = self.getHandler().getTitle,
args = (ids,['ENZYME'],))
obj.start()
threads.append(obj)
time.sleep(0.01)
if len(threads) == 0:
continue
while len(threads) > 0:
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
for handler in self.handlers:
if handler.failed:
logger.error('KEGG API error, aborting')
raise IOError('KEGG API error')
if not handler.result:
logger.debug('Found an empty handler')
continue
for kid, title in list(handler.result.items()):
self.reactdet[kid] = title
def getRPairDetails(self):
pieces = [p for p in get_span(list(self.rpairdet.keys()), 9)]
for piece in get_span(pieces, self.numThreads):
if self.killed:
logger.debug('Exiting for a kill signal')
return
self.cleanHandlers()
self._substatus += len([i for p in piece for i in p])
if self._substatus > self._maxsubstatus:
self._substatus = self._maxsubstatus
self.updateStatus(sub=True)
for ids in piece:
remove = set()
for i in ids:
if i in self.avoid:
remove.add(i)
for i in remove:
ids.remove(i)
if len(ids) == 0:
continue
for rid in ids:
self.rpairdet[rid] = [rid.split('_')[0],
rid.split('_')[1],
'main']
def getPathDetails(self):
pieces = [p for p in get_span(list(self.pathdet.keys()), 9)]
for piece in get_span(pieces, self.numThreads):
if self.killed:
logger.debug('Exiting for a kill signal')
return
self.cleanHandlers()
self._substatus += len([i for p in piece for i in p])
if self._substatus > self._maxsubstatus:
self._substatus = self._maxsubstatus
self.updateStatus(sub=True)
threads = []
for ids in piece:
remove = set()
for i in ids:
if i in self.avoid:
remove.add(i)
for i in remove:
ids.remove(i)
if len(ids) == 0:
continue
obj = threading.Thread(
target = self.getHandler().getTitle,
args = (ids,))
obj.start()
threads.append(obj)
time.sleep(0.01)
if len(threads) == 0:
continue
while len(threads) > 0:
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
for handler in self.handlers:
if handler.failed:
logger.error('KEGG API error, aborting')
raise IOError('KEGG API error')
if not handler.result:
logger.debug('Found an empty handler')
continue
for kid, title in list(handler.result.items()):
self.pathdet[kid] = title
def getMapsDetails(self):
for piece in get_span(list(self.pathdet.keys()), self.numThreads):
if self.killed:
logger.debug('Exiting for a kill signal')
return
self.cleanHandlers()
self._substatus += self.numThreads
if self._substatus > self._maxsubstatus:
self._substatus = self._maxsubstatus
self.updateStatus(sub=True)
threads = []
for path in piece:
if path in self.avoid:
continue
obj = threading.Thread(
target = self.handlers[piece.index(path)].getHTMLColoredPathway,
args = (path,[],[],))
obj.start()
threads.append(obj)
time.sleep(0.01)
if len(threads) == 0:
continue
while len(threads) > 0:
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
for handler in self.handlers:
if handler.failed:
logger.error('KEGG API error, aborting')
raise IOError('KEGG API error')
if not handler.result:
logger.debug('Found an empty handler')
continue
parser = MapParser(handler.result)
self.pathmap[handler.input] = parser.map
def getPathReactions(self):
pieces = [p for p in get_span(list(self.pathdet.keys()), 80)]
for piece in get_span(pieces, self.numThreads):
if self.killed:
logger.debug('Exiting for a kill signal')
return
self.cleanHandlers()
self._substatus += len([i for p in piece for i in p])
if self._substatus > self._maxsubstatus:
self._substatus = self._maxsubstatus
self.updateStatus(sub=True)
threads = []
for ids in piece:
remove = set()
for i in ids:
if i in self.avoid:
remove.add(i)
for i in remove:
ids.remove(i)
if len(ids) == 0:
continue
obj = threading.Thread(
target = self.getHandler().getReactionsFromPath,
args = (ids,))
obj.start()
threads.append(obj)
time.sleep(0.01)
if len(threads) == 0:
continue
while len(threads) > 0:
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
for handler in self.handlers:
if handler.failed:
logger.error('KEGG API error, aborting')
raise IOError('KEGG API error')
if not handler.result:
logger.debug('Found an empty handler')
continue
for path, reacts in list(handler.result.items()):
if path not in self.pathreact:
self.pathreact[path] = reacts
reacts = set([v for vs in list(handler.result.values()) for v in vs])
for react in reacts:
if react not in self.reactdet:
self.reactdet[react] = None
def getPathCompounds(self):
pieces = [p for p in get_span(list(self.pathdet.keys()), 80)]
for piece in get_span(pieces, self.numThreads):
if self.killed:
logger.debug('Exiting for a kill signal')
return
self.cleanHandlers()
self._substatus += len([i for p in piece for i in p])
if self._substatus > self._maxsubstatus:
self._substatus = self._maxsubstatus
self.updateStatus(sub=True)
threads = []
for ids in piece:
remove = set()
for i in ids:
if i in self.avoid:
remove.add(i)
for i in remove:
ids.remove(i)
if len(ids) == 0:
continue
obj = threading.Thread(
target = self.getHandler().getCompoundsFromPath,
args = (ids,))
obj.start()
threads.append(obj)
time.sleep(0.01)
if len(threads) == 0:
continue
while len(threads) > 0:
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
for handler in self.handlers:
if handler.failed:
logger.error('KEGG API error, aborting')
raise IOError('KEGG API error')
if not handler.result:
logger.debug('Found an empty handler')
continue
for path, comps in list(handler.result.items()):
if path not in self.pathcomp:
self.pathcomp[path] = comps
comps = set([v for vs in list(handler.result.values()) for v in vs])
for comp in comps:
if comp not in self.compdet:
self.compdet[comp] = None
def getCompDetails(self):
pieces = [p for p in get_span(list(self.compdet.keys()), 9)]
for piece in get_span(pieces, self.numThreads):
if self.killed:
logger.debug('Exiting for a kill signal')
return
self.cleanHandlers()
self._substatus += len([i for p in piece for i in p])
if self._substatus > self._maxsubstatus:
self._substatus = self._maxsubstatus
self.updateStatus(sub=True)
threads = []
for ids in piece:
remove = set()
for i in ids:
if i in self.avoid:
remove.add(i)
for i in remove:
ids.remove(i)
if len(ids) == 0:
continue
obj = threading.Thread(
target = self.getHandler().getTitle,
args = (ids,))
obj.start()
threads.append(obj)
time.sleep(0.01)
if len(threads) == 0:
continue
while len(threads) > 0:
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
for handler in self.handlers:
if handler.failed:
logger.error('KEGG API error, aborting')
raise IOError('KEGG API error')
if not handler.result:
logger.debug('Found an empty handler')
continue
for kid, title in list(handler.result.items()):
self.compdet[kid] = title
def getPathways(self):
pieces = [p for p in get_span(list(self.reactdet.keys()), 80)]
for piece in get_span(pieces, self.numThreads):
if self.killed:
logger.debug('Exiting for a kill signal')
return
self.cleanHandlers()
self._substatus += len([i for p in piece for i in p])
if self._substatus > self._maxsubstatus:
self._substatus = self._maxsubstatus
self.updateStatus(sub=True)
threads = []
for ids in piece:
remove = set()
for i in ids:
if i in self.avoid:
remove.add(i)
for i in remove:
ids.remove(i)
if len(ids) == 0:
continue
obj = threading.Thread(
target = self.getHandler().getPathways,
args = (ids,))
obj.start()
threads.append(obj)
time.sleep(0.01)
if len(threads) == 0:
continue
while len(threads) > 0:
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
for handler in self.handlers:
if handler.failed:
logger.error('KEGG API error, aborting')
raise IOError('KEGG API error')
if not handler.result:
logger.debug('Found an empty handler')
continue
for react, paths in list(handler.result.items()):
if react not in self.reactpath:
self.reactpath[react] = []
for path in paths:
if path.startswith('path:map'):continue
self.reactpath[react].append(path)
paths = set([v for vs in list(handler.result.values()) for v in vs])
for path in paths:
if path not in self.pathdet and not path.startswith('path:map'):
self.pathdet[path] = None
def getReactCompounds(self):
pieces = [p for p in get_span(list(self.reactdet.keys()), 80)]
for piece in get_span(pieces, self.numThreads):
if self.killed:
logger.debug('Exiting for a kill signal')
return
self.cleanHandlers()
self._substatus += len([i for p in piece for i in p])
if self._substatus > self._maxsubstatus:
self._substatus = self._maxsubstatus
self.updateStatus(sub=True)
threads = []
for ids in piece:
remove = set()
for i in ids:
if i in self.avoid:
remove.add(i)
for i in remove:
ids.remove(i)
if len(ids) == 0:
continue
obj = threading.Thread(
target = self.getHandler().getCompoundsFromReaction,
args = (ids,))
obj.start()
threads.append(obj)
time.sleep(0.01)
if len(threads) == 0:
continue
while len(threads) > 0:
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
for handler in self.handlers:
if handler.failed:
logger.error('KEGG API error, aborting')
raise IOError('KEGG API error')
if not handler.result:
logger.debug('Found an empty handler')
continue
for react, comps in list(handler.result.items()):
if react not in self.reactcomp:
self.reactcomp[react] = comps
comps = set([v for vs in list(handler.result.values()) for v in vs])
for comp in comps:
if comp not in self.compdet:
self.compdet[comp] = None
def getCompoundReacts(self):
pieces = [p for p in get_span(list(self.compdet.keys()), 80)]
for piece in get_span(pieces, self.numThreads):
if self.killed:
logger.debug('Exiting for a kill signal')
return
self.cleanHandlers()
self._substatus += len([i for p in piece for i in p])
if self._substatus > self._maxsubstatus:
self._substatus = self._maxsubstatus
self.updateStatus(sub=True)
threads = []
for ids in piece:
remove = set()
for i in ids:
if i in self.avoid:
remove.add(i)
for i in remove:
ids.remove(i)
if len(ids) == 0:
continue
obj = threading.Thread(
target = self.getHandler().getReactionsByComp,
args = (ids,))
obj.start()
threads.append(obj)
time.sleep(0.01)
if len(threads) == 0:
continue
while len(threads) > 0:
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
for handler in self.handlers:
if handler.failed:
logger.error('KEGG API error, aborting')
raise IOError('KEGG API error')
if not handler.result:
logger.debug('Found an empty handler')
continue
for comp, reacts in list(handler.result.items()):
if comp not in self.compreact:
self.compreact[comp] = reacts
reacts = set([v for vs in list(handler.result.values()) for v in vs])
for react in reacts:
if react not in self.reactdet:
self.reactdet[react] = None
def getReactRPairs(self):
pieces = [p for p in get_span(list(self.reactdet.keys()), 80)]
for piece in get_span(pieces, self.numThreads):
if self.killed:
logger.debug('Exiting for a kill signal')
return
self.cleanHandlers()
self._substatus += len([i for p in piece for i in p])
if self._substatus > self._maxsubstatus:
self._substatus = self._maxsubstatus
self.updateStatus(sub=True)
threads = []
for ids in piece:
remove = set()
for i in ids:
if i in self.avoid:
remove.add(i)
for i in remove:
ids.remove(i)
if len(ids) == 0:
continue
obj = threading.Thread(
target = self.getHandler().getRPairsFromReaction,
args = (ids,))
obj.start()
threads.append(obj)
time.sleep(0.01)
if len(threads) == 0:
continue
while len(threads) > 0:
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
for handler in self.handlers:
if handler.failed:
logger.error('KEGG API error, aborting')
raise IOError('KEGG API error')
if not handler.result:
logger.debug('Found an empty handler')
continue
for react, rpairs in list(handler.result.items()):
if react not in self.reactrpair:
self.reactrpair[react] = rpairs
rpairs = set([v for vs in list(handler.result.values()) for v in vs])
for rpair in rpairs:
if rpair not in self.rpairdet:
self.rpairdet[rpair] = None
class KoMapper(BaseMapper):
'''
From a list of KO IDs returns various details in an object
KO --> title and details
KO --> reactions (and titles)
reactions --> pathways (and titles)
pathways --> reactions, compounds (with titles)
maps --> for each pathway, the html maps (!!!)
'''
_statusDesc = {0:'Not started',
1:'Checking connectivity',
2:'Fetching reactions',
3:'Fetching rpairs',
4:'Fetching pathways',
5:'Fetching pathways content',
6:'Fetching reactions - compounds links',
7:'Fetching details on KEGG entries',
8:'Crafting results'}
_substatuses = [2,3,4,5,6,7]
def __init__(self, ko_list, threads=40, avoid=[], keeptrying=False,
queue=queue.Queue()):
BaseMapper.__init__(self, threads=threads, avoid=avoid,
keeptrying=keeptrying, queue=queue)
# Kegg
self.ko = ko_list
# Results
self.kodet = {}
self.koreact = {}
def getKOdet(self):
pieces = [p for p in get_span(self.ko, 9)]
for piece in get_span(pieces, self.numThreads):
if self.killed:
logger.debug('Exiting for a kill signal')
return
self.cleanHandlers()
self._substatus += len([i for p in piece for i in p])
if self._substatus > self._maxsubstatus:
self._substatus = self._maxsubstatus
self.updateStatus(sub=True)
threads = []
for ids in piece:
remove = set()
for i in ids:
if i in self.avoid:
remove.add(i)
for i in remove:
ids.remove(i)
if len(ids) == 0:
continue
obj = threading.Thread(
target = self.getHandler().getTitle,
args = (ids,))
obj.start()
threads.append(obj)
time.sleep(0.01)
if len(threads) == 0:
continue
while len(threads) > 0:
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
for handler in self.handlers:
if handler.failed:
logger.error('KEGG API error, aborting')
raise IOError('KEGG API error')
if not handler.result:
logger.debug('Found an empty handler')
continue
for kid, title in list(handler.result.items()):
self.kodet[kid] = title
def getReactions(self):
pieces = [p for p in get_span(self.ko, 80)]
for piece in get_span(pieces, self.numThreads):
if self.killed:
logger.debug('Exiting for a kill signal')
return
self.cleanHandlers()
self._substatus += len([i for p in piece for i in p])
if self._substatus > self._maxsubstatus:
self._substatus = self._maxsubstatus
self.updateStatus(sub=True)
threads = []
for ids in piece:
remove = set()
for i in ids:
if i in self.avoid:
remove.add(i)
for i in remove:
ids.remove(i)
if len(ids) == 0:
continue
obj = threading.Thread(
target = self.getHandler().getReactions,
args = (ids,))
obj.start()
threads.append(obj)
time.sleep(0.01)
if len(threads) == 0:
continue
while len(threads) > 0:
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
for handler in self.handlers:
if handler.failed:
logger.error('KEGG API error, aborting')
raise IOError('KEGG API error')
if not handler.result:
logger.debug('Found an empty handler')
continue
for ko, reacts in list(handler.result.items()):
if ko not in self.koreact:
self.koreact[ko] = reacts
reacts = set([v for vs in list(handler.result.values()) for v in vs])
for react in reacts:
if react not in self.reactdet:
self.reactdet[react] = None
def run(self):
self.updateStatus()
try:
self.checkConnection()
except Exception as e:
self.sendFailure(str(e))
return
# Reactions
self._maxsubstatus = len(self.ko)
self.updateStatus()
try:
self.getReactions()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Related rpairs
self._maxsubstatus = len(self.reactdet)
self.updateStatus()
logger.warning('Using RCLASS attribute of KEGG reactions, as the RPAIR database has now been discontinued')
try:
self.getReactRPairs()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Related pathways...
self._maxsubstatus = len(self.reactdet)
self.updateStatus()
try:
self.getPathways()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Pathways contents...
# 1. Reactions
self._maxsubstatus = len(self.pathdet)
self.updateStatus()
try:
self.getPathReactions()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# 2. Compounds
self._maxsubstatus = len(self.pathdet)
try:
self.getPathCompounds()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Compounds for each reaction
self._maxsubstatus = len(self.reactdet)
self.updateStatus()
try:
self.getReactCompounds()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Reactions for each compound
self._maxsubstatus = len(self.compdet)
try:
self.getCompoundReacts()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# KO details
self._maxsubstatus = len(self.ko)
self.updateStatus()
try:
self.getKOdet()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Pathway details
self._maxsubstatus = len(self.pathdet)
try:
self.getPathDetails()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Pathway HTML maps (!!!)
self._maxsubstatus = len(self.pathdet)
try:
self.getMapsDetails()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Reaction details
self._maxsubstatus = len(self.reactdet)
try:
self.getReactDetails()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Compound details
self._maxsubstatus = len(self.compdet)
try:
self.getCompDetails()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# RPair details
self._maxsubstatus = len(self.rpairdet)
logger.warning('Using RCLASS attribute of KEGG reactions, as the RPAIR database has now been discontinued')
try:
self.getRPairDetails()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Prepare the output object
self.updateStatus()
self.result = KeggDetails()
self.result.setDetails(self.kodet, self.reactdet,
self.compdet, self.pathdet, self.rpairdet)
self.result.setLinks(koreact=self.koreact, pathreact=self.pathreact,
pathcomp=self.pathcomp, reactcomp=self.reactcomp,
compreact=self.compreact,
rpairreact=self.rpairreact,
reactrpair=self.reactrpair)
self.result.setMaps(self.pathmap)
class CompMapper(BaseMapper):
'''
From a list of CO IDs returns various details in an object
CO --> title and details
CO --> pathways (and titles)
pathways --> reactions, compounds (with titles)
maps --> for each pathway, the html maps (!!!)
'''
_statusDesc = {0:'Not started',
1:'Checking connectivity',
2:'Fetching reactions',
3:'Fetching rpairs',
4:'Fetching pathways',
5:'Fetching pathways content',
6:'Fetching reactions - compounds links',
7:'Fetching details on KEGG entries',
8:'Crafting results'}
_substatuses = [2,3,4,5,6,7]
def __init__(self, co_list, threads=40, avoid=[], keeptrying=False,
queue=queue.Queue()):
BaseMapper.__init__(self, threads=threads, avoid=avoid,
keeptrying=keeptrying, queue=queue)
# Kegg
self.co = co_list
# Results
self.comppath = {}
def run(self):
self.updateStatus()
try:
self.checkConnection()
except Exception as e:
self.sendFailure(str(e))
return
# Reactions
for co_id in self.co:
self.compdet[co_id] = None
self._maxsubstatus = len(self.compdet)
self.updateStatus()
try:
self.getCompoundReacts()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Related rpairs
self._maxsubstatus = len(self.reactdet)
self.updateStatus()
logger.warning('Using RCLASS attribute of KEGG reactions, as the RPAIR database has now been discontinued')
try:
self.getReactRPairs()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Related pathways...
self._maxsubstatus = len(self.reactdet)
self.updateStatus()
try:
self.getPathways()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Pathways contents...
# 1. Reactions
self._maxsubstatus = len(self.pathdet)
self.updateStatus()
try:
self.getPathReactions()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# 2. Compounds
self._maxsubstatus = len(self.pathdet)
try:
self.getPathCompounds()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Compunds for each reaction
self._maxsubstatus = len(self.reactdet)
self.updateStatus()
try:
self.getReactCompounds()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Reactions for each compund
self._maxsubstatus = len(self.compdet)
try:
self.getCompoundReacts()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Pathway details
self._maxsubstatus = len(self.pathdet)
self.updateStatus()
try:
self.getPathDetails()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Pathway HTML maps (!!!)
self._maxsubstatus = len(self.pathdet)
try:
self.getMapsDetails()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Reaction details
self._maxsubstatus = len(self.reactdet)
try:
self.getReactDetails()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Compound details
self._maxsubstatus = len(self.compdet)
try:
self.getCompDetails()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# RPair details
self._maxsubstatus = len(self.rpairdet)
logger.warning('Using RCLASS attribute of KEGG reactions, as the RPAIR database has now been discontinued')
try:
self.getRPairDetails()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Prepare the output object
self.updateStatus()
self.result = KeggDetails()
self.result.setDetails(react=self.reactdet,
comp=self.compdet, path=self.pathdet,
rpair=self.rpairdet)
self.result.setLinks(pathreact=self.pathreact,
pathcomp=self.pathcomp, compreact=self.compreact,
reactcomp=self.reactcomp,
reactrpair=self.reactrpair,
rpairreact=self.rpairreact)
self.result.setMaps(self.pathmap)
class MapsFetcher(BaseKegg):
'''
Class MapsFetcher
Download colored Kegg maps (png or URLs)
Input: color_objs (KeggColor list), picture, htmls, urls, prefix, legend (file)
Output: tuple(list of png filenames, list of HTML files, list of URLs)
'''
_statusDesc = {0:'Not started',
1:'Checking connectivity',
2:'Making room',
3:'Fetching maps (pictures)',
4:'Generating interactive web pages'}
_substatuses = [3]
def __init__(self, color_objs, pictures=True, html=True, prefix='',
legend=None, threads=40, keeptrying=False,
queue=queue.Queue()):
BaseKegg.__init__(self, threads=threads, keeptrying=keeptrying,
queue=queue)
self.colors = color_objs
self.pictures = bool(pictures)
self.web = bool(html)
self.legend = legend
self._keggroom = None
self._prefix = prefix
# Outputs
self.pics = []
self.webpages = []
self.pages = []
self.result = (self.pics, self.webpages, self.pages)
def makeRoom(self,location=''):
'''
Creates a tmp directory in the desired location
'''
# KEGG database path
try:
path = os.path.abspath(location)
path = os.path.join(path, 'tmp')
try:os.mkdir(path)
except:pass
path = os.path.join(path, 'keggmaps')
try:os.mkdir(path)
except:pass
path = os.path.join(path, self._prefix)
self._keggroom = path
os.mkdir(path)
except:
logger.debug('Temporary directory creation failed! %s'
%path)
def copyLegend(self):
'''Copy the legend in the target directory'''
if self.legend and os.path.exists(self.legend):
legend = os.path.join(self._keggroom, 'legend.png')
shutil.copyfile(self.legend, legend)
return legend
return None
def getMaps(self):
legend = self.copyLegend()
for piece in get_span(self.colors, self.numThreads):
if self.killed:
logger.debug('Exiting for a kill signal')
return
self.cleanHandlers()
self._substatus += self.numThreads
if self._substatus > self._maxsubstatus:
self._substatus = self._maxsubstatus
self.updateStatus(sub=True)
threads = []
for kmap in piece:
path = kmap.path
# Skip the general maps
if path in avoidedPaths:
logger.debug('Skipping general pathway %s'%path)
continue
#
objs,colors = kmap.getAll()
dummy,borders = kmap.getBorders()
obj = threading.Thread(
target = self.handlers[piece.index(kmap)].getHTMLColoredPathway,
args = (path,objs,colors,borders,))
obj.start()
threads.append(obj)
time.sleep(0.01)
if len(threads) == 0:
continue
while len(threads) > 0:
for thread in threads:
if not thread.isAlive():
threads.remove(thread)
for handler in self.handlers:
if handler.failed:
logger.error('KEGG API error, aborting')
raise IOError('KEGG API error')
if not handler.result:
logger.debug('Found an empty handler')
continue
fname = os.path.join(self._keggroom,handler.input)
fname = fname+'.png'
# Fetch the map picture
# Hoping it won't change much in the future
if isinstance(handler.result, bytes):
handler.result = handler.result.decode('utf-8')
for line in handler.result.split('\n'):
if ('<img' in line
and 'pathwayimage' in line
and 'usemap="#mapdata"' in line):
urlimage = 'http://www.kegg.jp/' + line.split('src="')[1].split('"')[0]
sock=urlopen(urlimage, timeout=30)
pic = sock.read()
#if isinstance(pic, bytes):
# pic = pic.decode('utf-8')
sock.close()
fOut = open(fname,'wb')
fOut.write(pic)
fOut.close()
self.pics.append(fname)
#
def getWebPages(self):
# TODO: nicer web pages
legend = self.copyLegend()
legend = os.path.split(legend)[-1]
if legend:
fname = os.path.join(self._keggroom,'legend.html')
fOut = open(fname,'w')
fOut.write('<html>\n<head></head>\n<body>\n')
fOut.write('''<div align="center">
<img src="./%s" />
</div>\n'''%
(legend))
fOut.write('</body>\n</html>')
fOut.close()
for path in self.colors:
myindex = self.colors.index(path)
logger.debug('Writing interactive web page for %s'%path.path)
fname = os.path.join(self._keggroom,path.path)
fname = fname+'.html'
fOut = open(fname,'w')
fOut.write('<html>\n%s\n<body>\n'%kheader)
# Navigation
fOut.write('''<h2 align="center">\n''')
fOut.write('''<a href="./%s.html">«</a>%s'''%(
self.colors[myindex-1].path, path.path))
try:
next = self.colors[myindex+1]
except:
next = self.colors[0]
fOut.write('''<a href="./%s.html">»</a>\n</h2>\n'''%
(next.path))
if legend:
fOut.write('''<h3 align="center">
<a href="./legend.html">Color scheme</a>
</h3>\n''')
fOut.write('''<div align="center">
<img src="./%s" usemap="#mapdata" border="0" />
</div>\n'''%
(path.path+'.png'))
html = path.htmlmap.split('\n')
newhtml = []
for line in html:
line = line.replace('href="/dbget-bin/www_bget?',
'target="_blank" href="http://www.genome.jp/dbget-bin/www_bget?')
if '/kegg-bin/show_pathway?' in line:
s = line.split('/kegg-bin/show_pathway?')
s1 = s[1].split('"')
s1[0] += '.html'
line1 = '"'.join(s1)
line = './'.join([s[0]] + [line1])
newhtml.append(line)
fOut.write('%s\n'%'\n'.join(newhtml))
fOut.write('<div id="poplay" class="poplay" />\n</body>\n</html>')
fOut.close()
self.webpages.append(fname)
def run(self):
self.updateStatus()
try:
self.checkConnection()
except Exception as e:
self.sendFailure(str(e))
return
self.updateStatus()
self.makeRoom()
if self.killed:
return
# ':' bugfix
# the ':' char causes various problems in windows folders
for path in self.colors:
if ':' in path.path:
path.path = path.path.split(':')[1]
if self.pictures:
self._maxsubstatus = len(self.colors)
self.updateStatus()
try:
self.getMaps()
except Exception as e:
self.sendFailure(e)
return
self.cleanHandlers()
self.resetSubStatus()
else:
self.updateStatus(send=False)
if self.killed:
return
if self.web:
self.updateStatus()
try:
self.getWebPages()
except Exception as e:
self.sendFailure(e)
return
else:
self.updateStatus(send=False)
class KeggNet(BaseMapper):
'''
Fetch as much details as possible from the KEGG database,
starting from the pathways list
'''
_statusDesc = {0:'Not started',
1:'Checking connectivity',
2:'Fetching pathways',
3:'Fetching compounds',
4:'Fetching compounds - reactions links',
5:'Fetching reactions',
6:'Fetching rpairs',
7:'Fetching reactions - compounds links',
8:'Fetching details on KEGG entries',
9:'Crafting results'}
_substatuses = [3,4,5,6,7,8]
def __init__(self, threads=40, avoid=[], keeptrying=False,
queue=queue.Queue()):
BaseMapper.__init__(self, threads=threads, avoid=avoid,
keeptrying=keeptrying, queue=queue)
def getAllPathways(self):
'''
Get all the available pathway IDs
'''
kegg = KeggAPI()
kegg.getIDListFromDB('pathway')
for p in kegg.result:
self.pathdet[p] = None
def run(self):
self.updateStatus()
try:
self.checkConnection()
except Exception as e:
self.sendFailure(str(e))
return
# Get pathways
self.updateStatus()
try:
self.getAllPathways()
except Exception as e:
self.sendFailure(str(e))
return
if self.killed:
return
# Compounds
self._maxsubstatus = len(self.pathdet)
self.updateStatus()
try:
self.getPathCompounds()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Reactions for each compound
self._maxsubstatus = len(self.compdet)
self.updateStatus()
try:
self.getCompoundReacts()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Reactions
self._maxsubstatus = len(self.pathdet)
self.updateStatus()
try:
self.getPathReactions()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Related rpairs
self._maxsubstatus = len(self.reactdet)
self.updateStatus()
logger.warning('Using RCLASS attribute of KEGG reactions, as the RPAIR database has now been discontinued')
try:
self.getReactRPairs()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Compounds for each reaction
self._maxsubstatus = len(self.reactdet)
self.updateStatus()
try:
self.getReactCompounds()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Details
# Pathway details
self._maxsubstatus = len(self.pathdet)
self.updateStatus()
try:
self.getPathDetails()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Pathway HTML maps (!!!)
self._maxsubstatus = len(self.pathdet)
try:
self.getMapsDetails()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Reaction details
self._maxsubstatus = len(self.reactdet)
try:
self.getReactDetails()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Compound details
self._maxsubstatus = len(self.compdet)
try:
self.getCompDetails()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# RPair details
self._maxsubstatus = len(self.rpairdet)
logger.warning('Using RCLASS attribute of KEGG reactions, as the RPAIR database has now been discontinued')
try:
self.getRPairDetails()
except Exception as e:
self.sendFailure(str(e))
return
self.cleanHandlers()
self.resetSubStatus()
if self.killed:
return
# Prepare the output object
self.updateStatus()
self.result = KeggDetails()
self.result.setDetails(None, self.reactdet,
self.compdet, self.pathdet, self.rpairdet)
self.result.setLinks(pathreact=self.pathreact,
pathcomp=self.pathcomp, reactcomp=self.reactcomp,
compreact=self.compreact,
rpairreact=self.rpairreact,
reactrpair=self.reactrpair)
self.result.setMaps(self.pathmap)
| bsd-2-clause |
fsbr/se3-path-planner | modularPlanner/vscm.py | 1 | 6595 |
# coding: utf-8
# In this file, I try yet again to make a very simple cusp model. After that, I will try to expand it. This cusp model will literally be the simplest model possible that also includes dipole tilt.
#
# $\phi_{cusp} = \phi_{0} + \psi$.
#
# Then, I'll build up the model a little bit more and a little bit more etc.
#
# I'll use the Niehoff model for the dipole tilt. I'll use $phi_{0} = 0.24 rad \sim 78^{\circ}$
#
# Things to consider.
#
# 1. Datatypes. Just make everything a np.array()
# 2. The truth is the cusp model is a polar function so the MIDPOINT of the cusp may not be exactly what it's predicting. But the other truth is, even after a few months of trying to understand this, I still feel like I don't fully understand it.
#
#
#
# In[1]:
import tsyganenko as tsyg
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from spacepy import coordinates as coord
import spacepy.time as spt
from spacepy.time import Ticktock
import datetime as dt
from mpl_toolkits.mplot3d import Axes3D
import sys
Re = 6371
earth_radius_ax = 1.5*Re #km
#adding the year data here so I don't have to crush my github repo
pathname = '../../data-se3-path-planner/yearData/batch2018/'
# pathname = '../../batch2015/'
sys.path.append(pathname)
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec']
inclinations = [i for i in range(0,90,2)]
# In[2]:
def getColorMapSimple(filename):
df = pd.read_csv(filename)
GMAT_MJD_OFFSET = 29999.5
t = df['DefaultSC.A1ModJulian'] + GMAT_MJD_OFFSET
x = df['DefaultSC.gse.X']
y = df['DefaultSC.gse.Y']
z = df['DefaultSC.gse.Z']
# adding interpolation
tStart= t[0]
tEnd = t[len(t)-1]
tInterval = (t[1]-t[0])/10
t_0 = t
t = np.arange(tStart,tEnd,tInterval)
x = np.interp(t,t_0,x)
y = np.interp(t,t_0,y)
z = np.interp(t,t_0,z)
#
xa = np.array(x)
ya = np.array(y)
za = np.array(z)
print(za)
ta = np.array(t)
spacecraft = coord.Coords([[i,j,k] for i,j,k in zip(x,y,z)], 'GSE', 'car')
spacecraft.ticks = Ticktock(t,'MJD')
spacecraft = spacecraft.convert('SM','car')
points = 10000
# this figure validates what I already expected
# fig = plt.figure()
# ax = fig.add_subplot(111,projection='3d')
# ax.plot(spacecraft.x[:points],spacecraft.y[:points],spacecraft.z[:points])
# plt.title('SM Orbit')
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
# plt.show()
# In[3]:
# this is the part where I actually do it
phi_0 = 0.24
psi = tsyg.getTilt(t)
# dude if i made a parthenthetical error like this ill be really sad
# plt.plot(spacecraft.ticks.MJD, 90 - (phi_0 + psi))
# plt.show()
# In[4]:
r = np.sqrt(xa**2 + ya**2 + za**2)/Re
print("r equals to",r)
psi = np.deg2rad(psi)
psi = np.array(psi)
phi_0 = 0.24
alpha1 = 0.1287
alpha2 = 0.0314
phi_1 = phi_0 - (alpha1*psi + alpha2*psi**2)
phi_c = np.rad2deg(np.arcsin((np.sqrt(r))/(np.sqrt(r + (1/np.sin(phi_1))**2 - 1))) ) +psi
#phi = 90-(phi+psi)
lat = 90 - phi_c
lon = np.array(np.zeros(len(spacecraft.ticks.MJD)))
print(lon)
# plt.plot(t,lat)
# plt.title('Cusp Latitude vs. MJD day')
# plt.xlabel('MJD Day')
# plt.ylabel('Cusp Lat, Deg')
# plt.show()
# In[5]:
# LATITUDE
#working config SM
spacecraft_sm = spacecraft.convert('GSM','sph')
# plt.plot(spacecraft_sm.ticks.MJD, spacecraft_sm.lati)
# plt.plot(spacecraft_sm.ticks.MJD, lat)
# plt.title('Spacecraft Lat and Cusp Latitude vs MJD time')
# plt.show()
# In[6]:
# LONGITUDE
# try to avoid using the [:points] way except for spot checking
# kind of interested in a macro effect
# plt.plot(spacecraft_sm.ticks.MJD, spacecraft_sm.long)
# plt.plot(spacecraft_sm.ticks.MJD, lon)
# plt.title('Spacecraft and Cusp Longitude vs. Time')
# plt.show()
# In[7]:
count = []
region = []
c = 0
for satlat,cusplat, satlon,cusplon in zip(spacecraft_sm.lati, lat, spacecraft_sm.long, lon):
# 0<=cusplon<180
if abs(satlat - cusplat)<=2 and abs(satlon-cusplon)<=2:
# right now i'm using +/- 2 deg for the latitude,
# and +/- 2 deg for the longitude
# c+=1
region.append(1)
else:
region.append(0)
for x,x1 in zip(region,region[1:]):
if x==0 and x1 ==1:
c +=1
else:
pass
# plt.plot(spacecraft_sm.ticks.MJD, count)
# plt.xlabel('MJD tick')
# plt.ylabel('cusp crossings')
# plt.title('Cusp Crossings vs. MJD ticks')
#plt.xlim([58700, 58800])
# plt.show()
print("cusp crossings",c)
return c
cma2 = [[getColorMapSimple(pathname+month+str(inclination)+'_results.csv') for month in months] for inclination in inclinations]
f = open('cma2.dat','w')
print(cma2,file=f)
if __name__ == "__main__":
# cdict = {'red': ((0.0, 0.0, 0.0),
# (0.5, 1.0, 0.7),
# (1.0, 1.0, 1.0)),
# 'green': ((0.0, 0.0, 0.0),
# (0.5, 1.0, 0.0),
# (1.0, 1.0, 1.0)),
# 'blue': ((0.0, 0.0, 0.0),
# (0.5, 1.0, 0.0),
# (1.0, 0.5, 1.0))}
cdict = {'red': ((0.0, 0.0, 0.0),
(0.1, 0.5, 0.5),
(0.2, 0.0, 0.0),
(0.4, 0.2, 0.2),
(0.6, 0.0, 0.0),
(0.8, 1.0, 1.0),
(1.0, 1.0, 1.0)),
'green':((0.0, 0.0, 0.0),
(0.1, 0.0, 0.0),
(0.2, 0.0, 0.0),
(0.4, 1.0, 1.0),
(0.6, 1.0, 1.0),
(0.8, 1.0, 1.0),
(1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.0),
(0.1, 0.5, 0.5),
(0.2, 1.0, 1.0),
(0.4, 1.0, 1.0),
(0.6, 0.0, 0.0),
(0.8, 0.0, 0.0),
(1.0, 0.0, 0.0))}
my_cmap = colors.LinearSegmentedColormap('my_colormap',cdict,256)
plt.pcolor(cma2,cmap=my_cmap)
plt.colorbar()
plt.xlabel('Start Month')
# y_labels = [str(i) for i in range(0,90,5)] #8
# plt.yticks(inclinations,str(inclinations))
plt.ylabel('Inclinations')
plt.title('Cusp Crossings Analysis 2015')
plt.show()
| mit |
jllanfranchi/pygeneric | pandasUtils.py | 1 | 3347 | # -*- coding: iso-8859-15 -*-
import numpy as np
import pandas as pd
import multiprocessing
import pathos.multiprocessing as multi
def pdSafe(s):
'''Transform name into Pandas-safe name (i.e., dot-notation-accessible).'''
s = s.translate(None, '\\/ ?!@#$%^&*()-+=\`~|][{}<>,')
s = s.replace('.', '_')
return s
#def applyParallel(groupedDF, func, cpucount=None, chunksize=None):
# '''User Pietro Battiston's solution from
# http://stackoverflow.com/questions/26187759/parallelize-apply-after-pandas-groupby
# '''
# if cpucount is None:
# cpucount = multiprocessing.cpu_count()
# # Python 3 only?: with multiprocessing.Pool(cpucount) as pool:
# pool = multiprocessing.Pool(cpucount)
# #try:
# ret_list = pool.map(func, [group for name, group in groupedDF], chunksize=chunksize)
# #except:
# # pool.terminate()
# # raise
# return pd.concat(ret_list)
def applyParallel(groupedDF, func, cpucount=None, chunksize=None, nice=False):
'''Combination of user Pietro Battiston's solution from
http://stackoverflow.com/a/29281494
and Mike McKerns answer at http://stackoverflow.com/a/21345423
This requires that the func return a DataFrame (or indexed Series, if
that's even possible?)
'''
if cpucount is None:
if nice:
# Be nice, taking over most but not all of available physical CPU's
cpucount = int(np.floor(multiprocessing.cpu_count()*0.85))
else:
# Be greedy, trying to take over *all* available CPU's
cpucount = int(np.ceil(multiprocessing.cpu_count()))
with multi.ProcessingPool(cpucount) as pool:
pool = multi.ProcessingPool(cpucount)
ret_list = pool.imap(func,
[group for name, group in groupedDF],
chunksize=chunksize)
#if isinstance(ret_list[0], pd.Series):
# outDF = pd.concat(ret_list, axis=1).T
# return outDF
# #outDF.index
#else:
return pd.concat(ret_list)
def applymapParallel(groupedDF, func, cpucount=None, chunksize=None):
'''Combination of user Pietro Battiston's solution from
http://stackoverflow.com/a/29281494
and Mike McKerns answer at http://stackoverflow.com/a/21345423
This differs from applyParallel in that the func should only return a
scalar or vector (unindexed) result.
TODO: make this work (only roughed out thus far!)
'''
raise NotImplementedError('This function has yet to be fully fleshed out.')
def metafunc(func):
def dropinfunc(idx_grp):
return {idx_grp[0]: func(idx_grp[1])}
if cpucount is None:
#cpucount = int(np.ceil(multiprocessing.cpu_count()/2.0*0.75))
cpucount = int(np.ceil(multiprocessing.cpu_count()))
with multi.ProcessingPool(cpucount) as pool:
pool = multi.ProcessingPool(cpucount)
# The following MUST be one of the ordered pool methods (either `map`
# or `imap`), # or else reattaching an index will be arbitrary
ret_list = pool.map(dropinfunc, [idx_grp for idx_grp in groupedDF], chunksize=chunksize)
if isinstance(ret_list[0], pd.Series):
outDF = pd.concat(ret_list, axis=1).T
# TODO: give it an index!
elif isinstance(ret_list[0], pd.DataFrame):
return pd.concat(ret_list)
| mit |
apdavison/python-neo | doc/source/images/generate_diagram.py | 3 | 7863 | """
This generate diagram in .png and .svg from neo.core
Author: sgarcia
"""
from datetime import datetime
import numpy as np
import quantities as pq
from matplotlib import pyplot
from matplotlib.patches import Rectangle, ArrowStyle, FancyArrowPatch
from matplotlib.font_manager import FontProperties
from neo.test.generate_datasets import fake_neo
line_heigth = .22
fontsize = 10.5
left_text_shift = .1
dpi = 100
def get_rect_height(name, obj):
'''
calculate rectangle height
'''
nlines = 1.5
nlines += len(getattr(obj, '_all_attrs', []))
nlines += len(getattr(obj, '_single_child_objects', []))
nlines += len(getattr(obj, '_multi_child_objects', []))
return nlines * line_heigth
def annotate(ax, coord1, coord2, connectionstyle, color, alpha):
arrowprops = dict(arrowstyle='fancy',
# ~ patchB=p,
shrinkA=.3, shrinkB=.3,
fc=color, ec=color,
connectionstyle=connectionstyle,
alpha=alpha)
bbox = dict(boxstyle="square", fc="w")
a = ax.annotate('', coord1, coord2,
# xycoords="figure fraction",
# textcoords="figure fraction",
ha="right", va="center",
size=fontsize,
arrowprops=arrowprops,
bbox=bbox)
a.set_zorder(-4)
def calc_coordinates(pos, height):
x = pos[0]
y = pos[1] + height - line_heigth * .5
return pos[0], y
def generate_diagram(rect_pos, rect_width, figsize):
rw = rect_width
fig = pyplot.figure(figsize=figsize)
ax = fig.add_axes([0, 0, 1, 1])
all_h = {}
objs = {}
for name in rect_pos:
objs[name] = fake_neo(name, cascade=False)
all_h[name] = get_rect_height(name, objs[name])
# draw connections
color = ['c', 'm', 'y']
alpha = [1., 1., 0.3]
for name, pos in rect_pos.items():
obj = objs[name]
relationships = [getattr(obj, '_single_child_objects', []),
getattr(obj, '_multi_child_objects', []),
getattr(obj, '_child_properties', [])]
for r in range(3):
for ch_name in relationships[r]:
if ch_name not in rect_pos:
continue
x1, y1 = calc_coordinates(rect_pos[ch_name], all_h[ch_name])
x2, y2 = calc_coordinates(pos, all_h[name])
if r in [0, 2]:
x2 += rect_width
connectionstyle = "arc3,rad=-0.2"
elif y2 >= y1:
connectionstyle = "arc3,rad=0.7"
else:
connectionstyle = "arc3,rad=-0.7"
annotate(ax=ax, coord1=(x1, y1), coord2=(x2, y2),
connectionstyle=connectionstyle,
color=color[r], alpha=alpha[r])
# draw boxes
for name, pos in rect_pos.items():
htotal = all_h[name]
obj = objs[name]
allrelationship = list(getattr(obj, '_child_containers', []))
rect = Rectangle(pos, rect_width, htotal,
facecolor='w', edgecolor='k', linewidth=2.)
ax.add_patch(rect)
# title green
pos2 = pos[0], pos[1] + htotal - line_heigth * 1.5
rect = Rectangle(pos2, rect_width, line_heigth * 1.5,
facecolor='g', edgecolor='k', alpha=.5, linewidth=2.)
ax.add_patch(rect)
# single relationship
relationship = getattr(obj, '_single_child_objects', [])
pos2 = pos[1] + htotal - line_heigth * (1.5 + len(relationship))
rect_height = len(relationship) * line_heigth
rect = Rectangle((pos[0], pos2), rect_width, rect_height,
facecolor='c', edgecolor='k', alpha=.5)
ax.add_patch(rect)
# multi relationship
relationship = list(getattr(obj, '_multi_child_objects', []))
pos2 = (pos[1] + htotal - line_heigth * (1.5 + len(relationship))
- rect_height)
rect_height = len(relationship) * line_heigth
rect = Rectangle((pos[0], pos2), rect_width, rect_height,
facecolor='m', edgecolor='k', alpha=.5)
ax.add_patch(rect)
# necessary attr
pos2 = (pos[1] + htotal
- line_heigth * (1.5 + len(allrelationship) + len(obj._necessary_attrs)))
rect = Rectangle((pos[0], pos2), rect_width,
line_heigth * len(obj._necessary_attrs),
facecolor='r', edgecolor='k', alpha=.5)
ax.add_patch(rect)
# name
if hasattr(obj, '_quantity_attr'):
post = '* '
else:
post = ''
ax.text(pos[0] + rect_width / 2., pos[1] + htotal - line_heigth * 1.5 / 2.,
name + post,
horizontalalignment='center', verticalalignment='center',
fontsize=fontsize + 2,
fontproperties=FontProperties(weight='bold'),
)
# relationship
for i, relat in enumerate(allrelationship):
ax.text(pos[0] + left_text_shift, pos[1] + htotal - line_heigth * (i + 2),
relat + ': list',
horizontalalignment='left', verticalalignment='center',
fontsize=fontsize,
)
# attributes
for i, attr in enumerate(obj._all_attrs):
attrname, attrtype = attr[0], attr[1]
t1 = attrname
if (hasattr(obj, '_quantity_attr')
and obj._quantity_attr == attrname):
t1 = attrname + '(object itself)'
else:
t1 = attrname
if attrtype == pq.Quantity:
if attr[2] == 0:
t2 = 'Quantity scalar'
else:
t2 = 'Quantity %dD' % attr[2]
elif attrtype == np.ndarray:
t2 = "np.ndarray %dD dt='%s'" % (attr[2], attr[3].kind)
elif attrtype == datetime:
t2 = 'datetime'
else:
t2 = attrtype.__name__
t = t1 + ' : ' + t2
ax.text(pos[0] + left_text_shift,
pos[1] + htotal - line_heigth * (i + len(allrelationship) + 2),
t,
horizontalalignment='left', verticalalignment='center',
fontsize=fontsize,
)
xlim, ylim = figsize
ax.set_xlim(0, xlim)
ax.set_ylim(0, ylim)
ax.set_xticks([])
ax.set_yticks([])
return fig
def generate_diagram_simple():
figsize = (18, 12)
rw = rect_width = 3.
bf = blank_fact = 1.2
rect_pos = {
# col 0
'Block': (.5 + rw * bf * 0, 4),
# col 1
'Segment': (.5 + rw * bf * 1, .5),
'Group': (.5 + rw * bf * 1, 6.5),
# col 2 : not do for now too complicated with our object generator
# 'ChannelView': (.5 + rw * bf * 2, 5),
# col 2.5
'ImageSequence': (.5 + rw * bf * 2.5, 3.0),
'SpikeTrain': (.5 + rw * bf * 2.5, 0.5),
# col 3
'IrregularlySampledSignal': (.5 + rw * bf * 3, 9),
'AnalogSignal': (.5 + rw * bf * 3, 7.),
# col 3
'Event': (.5 + rw * bf * 4, 3.0),
'Epoch': (.5 + rw * bf * 4, 1.0),
}
# todo: add ImageSequence, RegionOfInterest
fig = generate_diagram(rect_pos, rect_width, figsize)
fig.savefig('simple_generated_diagram.png', dpi=dpi)
fig.savefig('simple_generated_diagram.svg', dpi=dpi)
if __name__ == '__main__':
generate_diagram_simple()
pyplot.show()
| bsd-3-clause |
aestrivex/ielu | ielu/plotting_utils.py | 1 | 6230 |
import os
import numpy as np
import nibabel as nib
from traits.api import HasTraits, Float, Int, Tuple
from traitsui.api import View, Item, CSVListEditor
from .geometry import get_vox2rasxfm, apply_affine, get_std_orientation
from .utils import get_subjects_dir
def force_render( figure=None ):
from mayavi import mlab
figure.scene.render()
mlab.draw(figure=figure)
from pyface.api import GUI
_gui = GUI()
orig_val = _gui.busy
_gui.set_busy(busy=True)
_gui.process_events()
_gui.set_busy(busy=orig_val)
_gui.process_events()
def coronal_slice(elecs, start=None, end=None, outfile=None,
subjects_dir=None,
subject=None, reorient2std=True, dpi=150, size=(200,200),
title=None):
'''
create an image of a coronal slice which serves as a guesstimate of a
depth lead inserted laterally and nonvaryingly in the Y axis
plot the electrodes from the lead overlaid on the slice in the X and Z
directions
Paramaters
----------
elecs : List( Electrode )
list of electrode objects forming this depth lead
start : Electrode
Electrode object at one end of the depth lead
end : Electrode
Electrode object at the other end of the depth lead
outfile : Str
Filename to save the image to
subjects_dir : Str | None
The freesurfer subjects_dir. If this is None, it is assumed to be the
$SUBJECTS_DIR environment variable. If this folder is not writable,
the program will crash.
subject : Str | None
The freesurfer subject. If this is None, it is assumed to be the
$SUBJECT environment variable.
reorient2std : Bool
Apply a matrix to rotate orig.mgz to the standard MNI orientation
emulating fslreorient2std. Pretty much always true here.
dpi : Int
Dots per inch of output image
size : Tuple
Specify a 2-tuple to control the image size, default is (200,200)
title : Str
Specify a matplotlib title
'''
print('creating coronal slice with start electrodes %s' % str(start))
subjdir_subj = get_subjects_dir( subjects_dir=subjects_dir,
subject=subject )
orig = os.path.join(subjdir_subj, 'mri', 'orig.mgz')
x_size, y_size, z_size = nib.load(orig).shape
# vox2ras and ras2vox shouldnt have different procedures for
# getting the different dimensions. the matrix showing those
# dimensions has the correct dimensions by inversion beforehand
# in the complex 3-way case
vox2ras = get_vox2rasxfm(orig, stem='vox2ras')
ras2vox = np.linalg.inv(vox2ras)
ras2vox[0:3,3] = (x_size/2, y_size/2, z_size/2)
rd, ad, sd = get_std_orientation(ras2vox)
# rd, = np.where(np.abs(ras2vox[:,0]) == np.max(np.abs(ras2vox[:,0])))
# ad, = np.where(np.abs(ras2vox[:,1]) == np.max(np.abs(ras2vox[:,1])))
# sd, = np.where(np.abs(ras2vox[:,2]) == np.max(np.abs(ras2vox[:,2])))
r_size = [x_size, y_size, z_size][rd]
a_size = [x_size, y_size, z_size][ad]
s_size = [x_size, y_size, z_size][sd]
#starty = pd.map_cursor( start.asras(), pd.current_affine, invert=True)[1]
#endy = pd.map_cursor( end.asras(), pd.current_affine, invert=True )[1]
#midy = (starty+endy)/2
#pd.move_cursor(128, midy, 128)
electrodes = np.squeeze([apply_affine([e.asras()], ras2vox)
for e in elecs])
#electrodes = np.array([pd.map_cursor(e.asras(), ras2vox,
# invert=True) for e in elecs])
vol = np.transpose( nib.load(orig).get_data(), (rd, ad, sd) )
if start is not None and end is not None:
start_coord = np.squeeze(apply_affine([start.asras()], ras2vox))
end_coord = np.squeeze(apply_affine([end.asras()], ras2vox))
if start_coord[rd] == end_coord[rd]:
raise ValueError('This lead has no variation in the X axis. It shouldnt be displayed coronally')
slice = np.zeros((s_size, r_size))
m = (start_coord[ad]-end_coord[ad])/(start_coord[rd]-end_coord[rd])
b = start_coord[ad]-m*start_coord[rd]
rnew = np.arange(r_size)
anew = m*rnew+b
alower = np.floor(anew)
afrac = np.mod(anew, 1)
try:
for rvox in rnew:
slice[:, rvox] = (vol[rvox, alower[rvox], :] *
(1-afrac[rvox])+vol[rvox, alower[rvox]+1, :] *
afrac[rvox])
except IndexError:
raise ValueError('This lead has minimal variation in the X axis. It shouldnt be displayed coronally')
else:
slice_nr = np.mean(electrodes[:,ad])
slice = vol[:, slice_nr, :].T
vox2pix = np.zeros((2,4))
vox2pix[0, rd] = 1
vox2pix[1, sd] = 1
ras2pix = np.dot(vox2pix, ras2vox)
pix = np.dot(ras2pix,
np.transpose([np.append(e.asras(), 1) for e in elecs]))
#add data to coronal plane
import pylab as pl
fig = pl.figure()
pl.imshow(slice, cmap='gray')
pl.scatter(pix[0,:], pix[1,:], s=10, c='red', edgecolor='yellow',
linewidths=0.4)
if title is not None:
pl.title(title)
pl.axis('off')
#pl.show()
if outfile is not None:
pl.savefig(outfile, dpi=dpi)
return fig
def sequence_3d_images( figure ):
from mayavi import mlab
views = [lambda:mlab.view( azimuth=0, elevation=90, figure=figure ),
lambda:mlab.view( azimuth=180, elevation=90, figure=figure ),
lambda:mlab.view( azimuth=0, elevation=0, figure=figure ),
lambda:mlab.view( azimuth=90, elevation=90, figure=figure ),
lambda:mlab.view( azimuth=270, elevation=90, figure=figure )]
for view in views:
yield view
def save_opaque_clinical_sequence( savefile, mayavi_figure ):
import pylab as pl
from matplotlib.backends.backend_pdf import PdfPages
from mayavi import mlab
with PdfPages(savefile) as pdf:
for angle in sequence_3d_images( mayavi_figure ):
angle()
force_render( figure=mayavi_figure )
pixmap = mlab.screenshot( figure=mayavi_figure )
mpl_figure = pl.figure()
pl.imshow(pixmap, figure=mpl_figure)
pdf.savefig(mpl_figure)
| gpl-3.0 |
ankurankan/pgmpy | pgmpy/inference/bn_inference.py | 2 | 8890 | from pgmpy.inference import Inference
from pgmpy.models import BayesianNetwork
import pandas as pd
import numpy as np
import networkx as nx
import itertools
class BayesianModelInference(Inference):
"""
Inference class specific to Bayesian Models
"""
def __init__(self, model):
"""
Class to calculate probability (pmf) values specific to Bayesian Models
Parameters
----------
model: Bayesian Model
model on which inference queries will be computed
"""
if not isinstance(model, BayesianNetwork):
raise TypeError(
"Model expected type: BayesianNetwork, got type: ", type(model)
)
super(BayesianModelInference, self).__init__(model)
self._initialize_structures()
self.topological_order = list(nx.topological_sort(model))
def pre_compute_reduce(self, variable):
"""
Get probability arrays for a node as function of conditional dependencies
Internal function used for Bayesian networks, eg. in BayesianModelSampling
and BayesianModelProbability.
Parameters
----------
variable: Bayesian Model Node
node of the Bayesian network
Returns
-------
dict: dictionary with probability array for node
as function of conditional dependency values
"""
variable_cpd = self.model.get_cpds(variable)
variable_evid = variable_cpd.variables[:0:-1]
cached_values = {}
for state_combination in itertools.product(
*[range(self.cardinality[var]) for var in variable_evid]
):
states = list(zip(variable_evid, state_combination))
cached_values[state_combination] = variable_cpd.reduce(
states, inplace=False, show_warnings=False
).values
return cached_values
def pre_compute_reduce_maps(self, variable):
"""
Get probability array-maps for a node as function of conditional dependencies
Internal function used for Bayesian networks, eg. in BayesianModelSampling
and BayesianModelProbability.
Parameters
----------
variable: Bayesian Model Node
node of the Bayesian network
Returns
-------
dict: dictionary with probability array-index for node as function of conditional dependency values,
dictionary with mapping of probability array-index to probability array.
"""
variable_cpd = self.model.get_cpds(variable)
variable_evid = variable_cpd.variables[:0:-1]
state_combinations = [
tuple(sc)
for sc in itertools.product(
*[range(self.cardinality[var]) for var in variable_evid]
)
]
weights_list = np.array(
[
variable_cpd.reduce(
list(zip(variable_evid, sc)), inplace=False, show_warnings=False
).values
for sc in state_combinations
]
)
unique_weights, weights_indices = np.unique(
weights_list, axis=0, return_inverse=True
)
# convert weights to index; make mapping of state to index
state_to_index = dict(zip(state_combinations, weights_indices))
# make mapping of index to weights
index_to_weight = dict(enumerate(unique_weights))
# return mappings of state to index, and index to weight
return state_to_index, index_to_weight
class BayesianModelProbability(BayesianModelInference):
"""
Class to calculate probability (pmf) values specific to Bayesian Models
"""
def __init__(self, model):
"""
Class to calculate probability (pmf) values specific to Bayesian Models
Parameters
----------
model: Bayesian Model
model on which inference queries will be computed
"""
super(BayesianModelProbability, self).__init__(model)
def _log_probability_node(self, data, ordering, node):
"""
Evaluate the log probability of each datapoint for a specific node.
Internal function used by log_probability().
Parameters
----------
data: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
ordering: list
ordering of columns in data, used by the Bayesian model.
default is topological ordering used by model.
node: Bayesian Model Node
node from the Bayesian network.
Returns
-------
ndarray: having shape (n_samples,)
The array of log(density) evaluations. These are normalized to be
probability densities, so values will be low for high-dimensional
data.
"""
def vec_translate(a, my_dict):
return np.vectorize(my_dict.__getitem__)(a)
cpd = self.model.get_cpds(node)
# variable to probe: data[n], where n is the node number
current = cpd.variables[0]
current_idx = ordering.index(current)
current_val = data[:, current_idx]
current_no = vec_translate(current_val, cpd.name_to_no[current])
# conditional dependencies E of the probed variable
evidence = cpd.variables[:0:-1]
evidence_idx = [ordering.index(ev) for ev in evidence]
evidence_val = data[:, evidence_idx]
evidence_no = np.empty_like(evidence_val)
for i, ev in enumerate(evidence):
evidence_no[:, i] = vec_translate(evidence_val[:, i], cpd.name_to_no[ev])
if evidence:
# there are conditional dependencies E for data[n] for this node
# Here we retrieve the array: p(x[n]|E). We do this for each x in data.
# We pick the specific node value from the arrays below.
state_to_index, index_to_weight = self.pre_compute_reduce_maps(
variable=node
)
unique, inverse = np.unique(evidence_no, axis=0, return_inverse=True)
weights = np.array(
[index_to_weight[state_to_index[tuple(u)]] for u in unique]
)[inverse]
else:
# there are NO conditional dependencies for this node
# retrieve array: p(x[n]). We do this for each x in data.
# We pick the specific node value from the arrays below.
weights = np.array([cpd.values] * len(data))
# pick the specific node value x[n] from the array p(x[n]|E) or p(x[n])
# We do this for each x in data.
probability_node = np.array([weights[i][cn] for i, cn in enumerate(current_no)])
return np.log(probability_node)
def log_probability(self, data, ordering=None):
"""
Evaluate the logarithmic probability of each point in a data set.
Parameters
----------
data: pandas dataframe OR array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
ordering: list
ordering of columns in data, used by the Bayesian model.
default is topological ordering used by model.
Returns
-------
ndarray: having shape (n_samples,)
The array of log(density) evaluations. These are normalized to be
probability densities, so values will be low for high-dimensional
data.
"""
if isinstance(data, pd.DataFrame):
# use numpy array from now on.
ordering = data.columns.to_list()
data = data.values
if ordering is None:
ordering = self.topological_order
logp = np.array(
[
self._log_probability_node(data, ordering, node)
for node in self.topological_order
]
)
return np.sum(logp, axis=0)
def score(self, data, ordering=None):
"""
Compute the total log probability density under the model.
Parameters
----------
data: pandas dataframe OR array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
ordering: list
ordering of columns in data, used by the Bayesian model.
default is topological ordering used by model.
Returns
-------
float: total log-likelihood of the data in data.
This is normalized to be a probability density, so the value
will be low for high-dimensional data.
"""
return np.sum(self.log_probability(data, ordering))
| mit |
walterreade/scikit-learn | sklearn/linear_model/ridge.py | 7 | 49612 | """
Ridge regression
"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Reuben Fletcher-Costin <reuben.fletchercostin@gmail.com>
# Fabian Pedregosa <fabian@fseoane.net>
# Michael Eickenberg <michael.eickenberg@nsup.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import row_norms
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..model_selection import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False, return_intercept=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
.. versionadded:: 0.17
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
.. versionadded:: 0.17
return_intercept : boolean, default False
If True and if X is sparse, the method also returns the intercept,
and the solver is automatically changed to 'sag'. This is only a
temporary fix for fitting the intercept with sparse data. For dense
data, use sklearn.linear_model._preprocess_data before your regression.
.. versionadded:: 0.17
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
intercept : float or array, shape = [n_targets]
The intercept of the model. Only returned if `return_intercept`
is True and if X is a scipy sparse array.
Notes
-----
This function won't compute the intercept.
"""
if return_intercept and sparse.issparse(X) and solver != 'sag':
if solver != 'auto':
warnings.warn("In Ridge, only 'sag' solver can currently fit the "
"intercept when X is sparse. Solver has been "
"automatically changed into 'sag'.")
solver = 'sag'
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = row_norms(X, squared=True).max()
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
intercept = np.zeros((y.shape[1], ))
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
init = {'coef': np.zeros((n_features + int(return_intercept), 1))}
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
init)
if return_intercept:
coef[i] = coef_[:-1]
intercept[i] = coef_[-1]
else:
coef[i] = coef_
n_iter[i] = n_iter_
if intercept.shape[0] == 1:
intercept = intercept[0]
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter and return_intercept:
return coef, n_iter, intercept
elif return_intercept:
return coef, intercept
elif return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_offset, y_offset, X_scale = self._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
# temporary fix for fitting the intercept with sparse data using 'sag'
if sparse.issparse(X) and self.fit_intercept:
self.coef_, self.n_iter_, self.intercept_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=True)
self.intercept_ += y_offset
else:
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True,
return_intercept=False)
self._set_intercept(X_offset, y_offset, X_scale)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fastest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data. However,
only 'sag' supports sparse input when `fit_intercept` is True.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in 'sag' solver.
.. versionadded:: 0.17
*random_state* to support Stochastic Average Gradient.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
.. versionadded:: 0.17
See also
--------
RidgeClassifier, RidgeCV, :func:`sklearn.kernel_ridge.KernelRidge`
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
.. versionadded:: 0.17
*sample_weight* support to Classifier.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
else:
# we don't (yet) support multi-label classification in Ridge
raise ValueError(
"%s doesn't support multi-label classification" % (
self.__class__.__name__))
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors_and_values_helper(self, alpha, y, v, Q, QT_y):
"""Helper function to avoid code duplication between self._errors and
self._values.
Notes
-----
We don't construct matrix G, instead compute action on y & diagonal.
"""
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
G_diag, c = self._errors_and_values_helper(alpha, y, v, Q, QT_y)
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_and_values_svd_helper(self, alpha, y, v, U, UT_y):
"""Helper function to avoid code duplication between self._errors_svd
and self._values_svd.
"""
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return G_diag, c
def _errors_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
G_diag, c = self._errors_and_values_svd_helper(alpha, y, v, U, UT_y)
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = LinearModel._preprocess_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_offset, y_offset, X_scale)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used, else, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
| bsd-3-clause |
lobnek/pyutil | test/test_portfolio/test_builder.py | 1 | 2535 | import pandas as pd
import pytest
from pyutil.portfolio.portfolio import Portfolio, merge
from test.config import read_pd
import pandas.testing as pt
@pytest.fixture(scope="module")
def portfolio():
return Portfolio(prices=read_pd("price.csv", parse_dates=True, index_col=0),
weights=read_pd("weight.csv", parse_dates=True, index_col=0))
def test_builder():
prices = pd.DataFrame(columns=["B", "A"], index=[1, 2], data=100)
portfolio = Portfolio(prices=prices)
portfolio[1] = pd.Series({"A": 0.5, "B": 0.5})
portfolio[2] = pd.Series({"A": 0.3, "B": 0.7})
pt.assert_series_equal(portfolio[2], pd.Series({"A": 0.3, "B": 0.7}), check_names=False)
assert portfolio.prices["A"][2] == 100
assert portfolio.asset_returns["A"][2] == 0.0
assert portfolio.weights["A"][1] == 0.5
assert portfolio.cash[2] == 0.0
assert str(portfolio) == "Portfolio with assets: ['B', 'A']"
def test_rename(portfolio):
p = portfolio.subportfolio(assets=["B", "A"]).rename(names={"A": "AA", "B": "BB"})
assert str(p) == "Portfolio with assets: ['BB', 'AA']"
def test_forward():
prices = pd.DataFrame(columns=["A", "B"], index=[1, 2, 3], data=[[100, 120], [100, 108], [130, 120]])
portfolio = Portfolio(prices=prices)
portfolio[1] = pd.Series({"A": 0.5, "B": 0.4})
# forward the portfolio
for t, p in portfolio.loop():
portfolio = p
assert portfolio.weights["A"][3] == pytest.approx(0.56521739130434789, 1e-5)
def test_empty():
portfolio = Portfolio(prices=pd.DataFrame({}))
assert portfolio.empty
def test_merge():
prices1 = pd.DataFrame(columns=["B", "A"], index=[1, 2], data=100)
portfolio1 = Portfolio(prices=prices1)
portfolio1[1] = pd.Series({"A": 0.5, "B": 0.5})
portfolio1[2] = pd.Series({"A": 0.3, "B": 0.7})
prices2 = pd.DataFrame(columns=["C", "D"], index=[1, 2], data=200)
portfolio2 = Portfolio(prices=prices2)
portfolio2[1] = pd.Series({"C": 0.5, "D": 0.5})
portfolio2[2] = pd.Series({"C": 0.3, "D": 0.7})
portfolio = merge(portfolios=[0.5 * portfolio1, 0.5 * portfolio2], axis=1)
assert portfolio.assets == ["A", "B", "C", "D"]
prices3 = pd.DataFrame(columns=["A", "B"], index=[1, 2], data=200)
portfolio3 = Portfolio(prices=prices3)
portfolio3[1] = pd.Series({"A": 0.5, "B": 0.5})
portfolio3[2] = pd.Series({"A": 0.3, "B": 0.7})
with pytest.raises(ValueError):
# overlapping columns!
merge(portfolios=[portfolio1, portfolio3], axis=1)
| mit |
pnedunuri/scikit-learn | examples/ensemble/plot_ensemble_oob.py | 259 | 3265 | """
=============================
OOB Errors for Random Forests
=============================
The ``RandomForestClassifier`` is trained using *bootstrap aggregation*, where
each new tree is fit from a bootstrap sample of the training observations
:math:`z_i = (x_i, y_i)`. The *out-of-bag* (OOB) error is the average error for
each :math:`z_i` calculated using predictions from the trees that do not
contain :math:`z_i` in their respective bootstrap sample. This allows the
``RandomForestClassifier`` to be fit and validated whilst being trained [1].
The example below demonstrates how the OOB error can be measured at the
addition of each new tree during training. The resulting plot allows a
practitioner to approximate a suitable value of ``n_estimators`` at which the
error stabilizes.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", p592-593, Springer, 2009.
"""
import matplotlib.pyplot as plt
from collections import OrderedDict
from sklearn.datasets import make_classification
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
# Author: Kian Ho <hui.kian.ho@gmail.com>
# Gilles Louppe <g.louppe@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 Clause
print(__doc__)
RANDOM_STATE = 123
# Generate a binary classification dataset.
X, y = make_classification(n_samples=500, n_features=25,
n_clusters_per_class=1, n_informative=15,
random_state=RANDOM_STATE)
# NOTE: Setting the `warm_start` construction parameter to `True` disables
# support for paralellised ensembles but is necessary for tracking the OOB
# error trajectory during training.
ensemble_clfs = [
("RandomForestClassifier, max_features='sqrt'",
RandomForestClassifier(warm_start=True, oob_score=True,
max_features="sqrt",
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features='log2'",
RandomForestClassifier(warm_start=True, max_features='log2',
oob_score=True,
random_state=RANDOM_STATE)),
("RandomForestClassifier, max_features=None",
RandomForestClassifier(warm_start=True, max_features=None,
oob_score=True,
random_state=RANDOM_STATE))
]
# Map a classifier name to a list of (<n_estimators>, <error rate>) pairs.
error_rate = OrderedDict((label, []) for label, _ in ensemble_clfs)
# Range of `n_estimators` values to explore.
min_estimators = 15
max_estimators = 175
for label, clf in ensemble_clfs:
for i in range(min_estimators, max_estimators + 1):
clf.set_params(n_estimators=i)
clf.fit(X, y)
# Record the OOB error for each `n_estimators=i` setting.
oob_error = 1 - clf.oob_score_
error_rate[label].append((i, oob_error))
# Generate the "OOB error rate" vs. "n_estimators" plot.
for label, clf_err in error_rate.items():
xs, ys = zip(*clf_err)
plt.plot(xs, ys, label=label)
plt.xlim(min_estimators, max_estimators)
plt.xlabel("n_estimators")
plt.ylabel("OOB error rate")
plt.legend(loc="upper right")
plt.show()
| bsd-3-clause |
pythonvietnam/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/core/dtypes/dtypes.py | 1 | 31325 | """ define extension dtypes """
import re
import warnings
import numpy as np
import pytz
from pandas._libs.interval import Interval
from pandas._libs.tslibs import NaT, Period, Timestamp, timezones
from pandas.core.dtypes.generic import ABCCategoricalIndex, ABCIndexClass
from pandas import compat
from .base import ExtensionDtype, _DtypeOpsMixin
from .inference import is_list_like
def register_extension_dtype(cls):
"""Class decorator to register an ExtensionType with pandas.
.. versionadded:: 0.24.0
This enables operations like ``.astype(name)`` for the name
of the ExtensionDtype.
Examples
--------
>>> from pandas.api.extensions import register_extension_dtype
>>> from pandas.api.extensions import ExtensionDtype
>>> @register_extension_dtype
... class MyExtensionDtype(ExtensionDtype):
... pass
"""
registry.register(cls)
return cls
class Registry(object):
"""
Registry for dtype inference
The registry allows one to map a string repr of a extension
dtype to an extension dtype. The string alias can be used in several
places, including
* Series and Index constructors
* :meth:`pandas.array`
* :meth:`pandas.Series.astype`
Multiple extension types can be registered.
These are tried in order.
"""
def __init__(self):
self.dtypes = []
def register(self, dtype):
"""
Parameters
----------
dtype : ExtensionDtype
"""
if not issubclass(dtype, (PandasExtensionDtype, ExtensionDtype)):
raise ValueError("can only register pandas extension dtypes")
self.dtypes.append(dtype)
def find(self, dtype):
"""
Parameters
----------
dtype : PandasExtensionDtype or string
Returns
-------
return the first matching dtype, otherwise return None
"""
if not isinstance(dtype, compat.string_types):
dtype_type = dtype
if not isinstance(dtype, type):
dtype_type = type(dtype)
if issubclass(dtype_type, ExtensionDtype):
return dtype
return None
for dtype_type in self.dtypes:
try:
return dtype_type.construct_from_string(dtype)
except TypeError:
pass
return None
registry = Registry()
class PandasExtensionDtype(_DtypeOpsMixin):
"""
A np.dtype duck-typed class, suitable for holding a custom dtype.
THIS IS NOT A REAL NUMPY DTYPE
"""
type = None
subdtype = None
kind = None
str = None
num = 100
shape = tuple()
itemsize = 8
base = None
isbuiltin = 0
isnative = 0
_cache = {}
def __unicode__(self):
return self.name
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
def __hash__(self):
raise NotImplementedError("sub-classes should implement an __hash__ "
"method")
def __getstate__(self):
# pickle support; we don't want to pickle the cache
return {k: getattr(self, k, None) for k in self._metadata}
@classmethod
def reset_cache(cls):
""" clear the cache """
cls._cache = {}
class CategoricalDtypeType(type):
"""
the type of CategoricalDtype, this metaclass determines subclass ability
"""
pass
@register_extension_dtype
class CategoricalDtype(PandasExtensionDtype, ExtensionDtype):
"""
Type for categorical data with the categories and orderedness
.. versionchanged:: 0.21.0
Parameters
----------
categories : sequence, optional
Must be unique, and must not contain any nulls.
ordered : bool, default False
Attributes
----------
categories
ordered
Methods
-------
None
See Also
--------
pandas.Categorical
Notes
-----
This class is useful for specifying the type of a ``Categorical``
independent of the values. See :ref:`categorical.categoricaldtype`
for more.
Examples
--------
>>> t = pd.CategoricalDtype(categories=['b', 'a'], ordered=True)
>>> pd.Series(['a', 'b', 'a', 'c'], dtype=t)
0 a
1 b
2 a
3 NaN
dtype: category
Categories (2, object): [b < a]
"""
# TODO: Document public vs. private API
name = 'category'
type = CategoricalDtypeType
kind = 'O'
str = '|O08'
base = np.dtype('O')
_metadata = ('categories', 'ordered')
_cache = {}
def __init__(self, categories=None, ordered=None):
self._finalize(categories, ordered, fastpath=False)
@classmethod
def _from_fastpath(cls, categories=None, ordered=None):
self = cls.__new__(cls)
self._finalize(categories, ordered, fastpath=True)
return self
@classmethod
def _from_categorical_dtype(cls, dtype, categories=None, ordered=None):
if categories is ordered is None:
return dtype
if categories is None:
categories = dtype.categories
if ordered is None:
ordered = dtype.ordered
return cls(categories, ordered)
@classmethod
def _from_values_or_dtype(cls, values=None, categories=None, ordered=None,
dtype=None):
"""
Construct dtype from the input parameters used in :class:`Categorical`.
This constructor method specifically does not do the factorization
step, if that is needed to find the categories. This constructor may
therefore return ``CategoricalDtype(categories=None, ordered=None)``,
which may not be useful. Additional steps may therefore have to be
taken to create the final dtype.
The return dtype is specified from the inputs in this prioritized
order:
1. if dtype is a CategoricalDtype, return dtype
2. if dtype is the string 'category', create a CategoricalDtype from
the supplied categories and ordered parameters, and return that.
3. if values is a categorical, use value.dtype, but override it with
categories and ordered if either/both of those are not None.
4. if dtype is None and values is not a categorical, construct the
dtype from categories and ordered, even if either of those is None.
Parameters
----------
values : list-like, optional
The list-like must be 1-dimensional.
categories : list-like, optional
Categories for the CategoricalDtype.
ordered : bool, optional
Designating if the categories are ordered.
dtype : CategoricalDtype or the string "category", optional
If ``CategoricalDtype``, cannot be used together with
`categories` or `ordered`.
Returns
-------
CategoricalDtype
Examples
--------
>>> CategoricalDtype._from_values_or_dtype()
CategoricalDtype(categories=None, ordered=None)
>>> CategoricalDtype._from_values_or_dtype(categories=['a', 'b'],
... ordered=True)
CategoricalDtype(categories=['a', 'b'], ordered=True)
>>> dtype1 = CategoricalDtype(['a', 'b'], ordered=True)
>>> dtype2 = CategoricalDtype(['x', 'y'], ordered=False)
>>> c = Categorical([0, 1], dtype=dtype1, fastpath=True)
>>> CategoricalDtype._from_values_or_dtype(c, ['x', 'y'], ordered=True,
... dtype=dtype2)
ValueError: Cannot specify `categories` or `ordered` together with
`dtype`.
The supplied dtype takes precedence over values' dtype:
>>> CategoricalDtype._from_values_or_dtype(c, dtype=dtype2)
CategoricalDtype(['x', 'y'], ordered=False)
"""
from pandas.core.dtypes.common import is_categorical
if dtype is not None:
# The dtype argument takes precedence over values.dtype (if any)
if isinstance(dtype, compat.string_types):
if dtype == 'category':
dtype = CategoricalDtype(categories, ordered)
else:
msg = "Unknown dtype {dtype!r}"
raise ValueError(msg.format(dtype=dtype))
elif categories is not None or ordered is not None:
raise ValueError("Cannot specify `categories` or `ordered` "
"together with `dtype`.")
elif is_categorical(values):
# If no "dtype" was passed, use the one from "values", but honor
# the "ordered" and "categories" arguments
dtype = values.dtype._from_categorical_dtype(values.dtype,
categories, ordered)
else:
# If dtype=None and values is not categorical, create a new dtype.
# Note: This could potentially have categories=None and
# ordered=None.
dtype = CategoricalDtype(categories, ordered)
return dtype
def _finalize(self, categories, ordered, fastpath=False):
if ordered is not None:
self.validate_ordered(ordered)
if categories is not None:
categories = self.validate_categories(categories,
fastpath=fastpath)
self._categories = categories
self._ordered = ordered
def __setstate__(self, state):
self._categories = state.pop('categories', None)
self._ordered = state.pop('ordered', False)
def __hash__(self):
# _hash_categories returns a uint64, so use the negative
# space for when we have unknown categories to avoid a conflict
if self.categories is None:
if self.ordered:
return -1
else:
return -2
# We *do* want to include the real self.ordered here
return int(self._hash_categories(self.categories, self.ordered))
def __eq__(self, other):
"""
Rules for CDT equality:
1) Any CDT is equal to the string 'category'
2) Any CDT is equal to itself
3) Any CDT is equal to a CDT with categories=None regardless of ordered
4) A CDT with ordered=True is only equal to another CDT with
ordered=True and identical categories in the same order
5) A CDT with ordered={False, None} is only equal to another CDT with
ordered={False, None} and identical categories, but same order is
not required. There is no distinction between False/None.
6) Any other comparison returns False
"""
if isinstance(other, compat.string_types):
return other == self.name
elif other is self:
return True
elif not (hasattr(other, 'ordered') and hasattr(other, 'categories')):
return False
elif self.categories is None or other.categories is None:
# We're forced into a suboptimal corner thanks to math and
# backwards compatibility. We require that `CDT(...) == 'category'`
# for all CDTs **including** `CDT(None, ...)`. Therefore, *all*
# CDT(., .) = CDT(None, False) and *all*
# CDT(., .) = CDT(None, True).
return True
elif self.ordered or other.ordered:
# At least one has ordered=True; equal if both have ordered=True
# and the same values for categories in the same order.
return ((self.ordered == other.ordered) and
self.categories.equals(other.categories))
else:
# Neither has ordered=True; equal if both have the same categories,
# but same order is not necessary. There is no distinction between
# ordered=False and ordered=None: CDT(., False) and CDT(., None)
# will be equal if they have the same categories.
return hash(self) == hash(other)
def __repr__(self):
tpl = u'CategoricalDtype(categories={}ordered={})'
if self.categories is None:
data = u"None, "
else:
data = self.categories._format_data(name=self.__class__.__name__)
return tpl.format(data, self.ordered)
@staticmethod
def _hash_categories(categories, ordered=True):
from pandas.core.util.hashing import (
hash_array, _combine_hash_arrays, hash_tuples
)
from pandas.core.dtypes.common import is_datetime64tz_dtype, _NS_DTYPE
if len(categories) and isinstance(categories[0], tuple):
# assumes if any individual category is a tuple, then all our. ATM
# I don't really want to support just some of the categories being
# tuples.
categories = list(categories) # breaks if a np.array of categories
cat_array = hash_tuples(categories)
else:
if categories.dtype == 'O':
types = [type(x) for x in categories]
if not len(set(types)) == 1:
# TODO: hash_array doesn't handle mixed types. It casts
# everything to a str first, which means we treat
# {'1', '2'} the same as {'1', 2}
# find a better solution
hashed = hash((tuple(categories), ordered))
return hashed
if is_datetime64tz_dtype(categories.dtype):
# Avoid future warning.
categories = categories.astype(_NS_DTYPE)
cat_array = hash_array(np.asarray(categories), categorize=False)
if ordered:
cat_array = np.vstack([
cat_array, np.arange(len(cat_array), dtype=cat_array.dtype)
])
else:
cat_array = [cat_array]
hashed = _combine_hash_arrays(iter(cat_array),
num_items=len(cat_array))
return np.bitwise_xor.reduce(hashed)
@classmethod
def construct_array_type(cls):
"""
Return the array type associated with this dtype
Returns
-------
type
"""
from pandas import Categorical
return Categorical
@classmethod
def construct_from_string(cls, string):
"""
attempt to construct this type from a string, raise a TypeError if
it's not possible """
try:
if string == 'category':
return cls()
else:
raise TypeError("cannot construct a CategoricalDtype")
except AttributeError:
pass
@staticmethod
def validate_ordered(ordered):
"""
Validates that we have a valid ordered parameter. If
it is not a boolean, a TypeError will be raised.
Parameters
----------
ordered : object
The parameter to be verified.
Raises
------
TypeError
If 'ordered' is not a boolean.
"""
from pandas.core.dtypes.common import is_bool
if not is_bool(ordered):
raise TypeError("'ordered' must either be 'True' or 'False'")
@staticmethod
def validate_categories(categories, fastpath=False):
"""
Validates that we have good categories
Parameters
----------
categories : array-like
fastpath : bool
Whether to skip nan and uniqueness checks
Returns
-------
categories : Index
"""
from pandas import Index
if not fastpath and not is_list_like(categories):
msg = "Parameter 'categories' must be list-like, was {!r}"
raise TypeError(msg.format(categories))
elif not isinstance(categories, ABCIndexClass):
categories = Index(categories, tupleize_cols=False)
if not fastpath:
if categories.hasnans:
raise ValueError('Categorial categories cannot be null')
if not categories.is_unique:
raise ValueError('Categorical categories must be unique')
if isinstance(categories, ABCCategoricalIndex):
categories = categories.categories
return categories
def update_dtype(self, dtype):
"""
Returns a CategoricalDtype with categories and ordered taken from dtype
if specified, otherwise falling back to self if unspecified
Parameters
----------
dtype : CategoricalDtype
Returns
-------
new_dtype : CategoricalDtype
"""
if isinstance(dtype, compat.string_types) and dtype == 'category':
# dtype='category' should not change anything
return self
elif not self.is_dtype(dtype):
msg = ('a CategoricalDtype must be passed to perform an update, '
'got {dtype!r}').format(dtype=dtype)
raise ValueError(msg)
elif dtype.categories is not None and dtype.ordered is self.ordered:
return dtype
# dtype is CDT: keep current categories/ordered if None
new_categories = dtype.categories
if new_categories is None:
new_categories = self.categories
new_ordered = dtype.ordered
if new_ordered is None:
new_ordered = self.ordered
return CategoricalDtype(new_categories, new_ordered)
@property
def categories(self):
"""
An ``Index`` containing the unique categories allowed.
"""
return self._categories
@property
def ordered(self):
"""
Whether the categories have an ordered relationship.
"""
return self._ordered
@property
def _is_boolean(self):
from pandas.core.dtypes.common import is_bool_dtype
return is_bool_dtype(self.categories)
@register_extension_dtype
class DatetimeTZDtype(PandasExtensionDtype, ExtensionDtype):
"""
A np.dtype duck-typed class, suitable for holding a custom datetime with tz
dtype.
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of
np.datetime64[ns]
"""
type = Timestamp
kind = 'M'
str = '|M8[ns]'
num = 101
base = np.dtype('M8[ns]')
na_value = NaT
_metadata = ('unit', 'tz')
_match = re.compile(r"(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]")
_cache = {}
def __init__(self, unit="ns", tz=None):
"""
An ExtensionDtype for timezone-aware datetime data.
Parameters
----------
unit : str, default "ns"
The precision of the datetime data. Currently limited
to ``"ns"``.
tz : str, int, or datetime.tzinfo
The timezone.
Raises
------
pytz.UnknownTimeZoneError
When the requested timezone cannot be found.
Examples
--------
>>> pd.core.dtypes.dtypes.DatetimeTZDtype(tz='UTC')
datetime64[ns, UTC]
>>> pd.core.dtypes.dtypes.DatetimeTZDtype(tz='dateutil/US/Central')
datetime64[ns, tzfile('/usr/share/zoneinfo/US/Central')]
"""
if isinstance(unit, DatetimeTZDtype):
unit, tz = unit.unit, unit.tz
if unit != 'ns':
if isinstance(unit, compat.string_types) and tz is None:
# maybe a string like datetime64[ns, tz], which we support for
# now.
result = type(self).construct_from_string(unit)
unit = result.unit
tz = result.tz
msg = (
"Passing a dtype alias like 'datetime64[ns, {tz}]' "
"to DatetimeTZDtype is deprecated. Use "
"'DatetimeTZDtype.construct_from_string()' instead."
)
warnings.warn(msg.format(tz=tz), FutureWarning, stacklevel=2)
else:
raise ValueError("DatetimeTZDtype only supports ns units")
if tz:
tz = timezones.maybe_get_tz(tz)
elif tz is not None:
raise pytz.UnknownTimeZoneError(tz)
elif tz is None:
raise TypeError("A 'tz' is required.")
self._unit = unit
self._tz = tz
@property
def unit(self):
"""The precision of the datetime data."""
return self._unit
@property
def tz(self):
"""The timezone."""
return self._tz
@classmethod
def construct_array_type(cls):
"""
Return the array type associated with this dtype
Returns
-------
type
"""
from pandas.core.arrays import DatetimeArray
return DatetimeArray
@classmethod
def construct_from_string(cls, string):
"""
Construct a DatetimeTZDtype from a string.
Parameters
----------
string : str
The string alias for this DatetimeTZDtype.
Should be formatted like ``datetime64[ns, <tz>]``,
where ``<tz>`` is the timezone name.
Examples
--------
>>> DatetimeTZDtype.construct_from_string('datetime64[ns, UTC]')
datetime64[ns, UTC]
"""
if isinstance(string, compat.string_types):
msg = "Could not construct DatetimeTZDtype from '{}'"
try:
match = cls._match.match(string)
if match:
d = match.groupdict()
return cls(unit=d['unit'], tz=d['tz'])
except Exception:
# TODO(py3): Change this pass to `raise TypeError(msg) from e`
pass
raise TypeError(msg.format(string))
raise TypeError("Could not construct DatetimeTZDtype")
def __unicode__(self):
return "datetime64[{unit}, {tz}]".format(unit=self.unit, tz=self.tz)
@property
def name(self):
"""A string representation of the dtype."""
return str(self)
def __hash__(self):
# make myself hashable
# TODO: update this.
return hash(str(self))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name
return (isinstance(other, DatetimeTZDtype) and
self.unit == other.unit and
str(self.tz) == str(other.tz))
def __setstate__(self, state):
# for pickle compat.
self._tz = state['tz']
self._unit = state['unit']
@register_extension_dtype
class PeriodDtype(ExtensionDtype, PandasExtensionDtype):
"""
A Period duck-typed class, suitable for holding a period with freq dtype.
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.int64.
"""
type = Period
kind = 'O'
str = '|O08'
base = np.dtype('O')
num = 102
_metadata = ('freq',)
_match = re.compile(r"(P|p)eriod\[(?P<freq>.+)\]")
_cache = {}
def __new__(cls, freq=None):
"""
Parameters
----------
freq : frequency
"""
if isinstance(freq, PeriodDtype):
return freq
elif freq is None:
# empty constructor for pickle compat
return object.__new__(cls)
from pandas.tseries.offsets import DateOffset
if not isinstance(freq, DateOffset):
freq = cls._parse_dtype_strict(freq)
try:
return cls._cache[freq.freqstr]
except KeyError:
u = object.__new__(cls)
u.freq = freq
cls._cache[freq.freqstr] = u
return u
@classmethod
def _parse_dtype_strict(cls, freq):
if isinstance(freq, compat.string_types):
if freq.startswith('period[') or freq.startswith('Period['):
m = cls._match.search(freq)
if m is not None:
freq = m.group('freq')
from pandas.tseries.frequencies import to_offset
freq = to_offset(freq)
if freq is not None:
return freq
raise ValueError("could not construct PeriodDtype")
@classmethod
def construct_from_string(cls, string):
"""
Strict construction from a string, raise a TypeError if not
possible
"""
from pandas.tseries.offsets import DateOffset
if (isinstance(string, compat.string_types) and
(string.startswith('period[') or
string.startswith('Period[')) or
isinstance(string, DateOffset)):
# do not parse string like U as period[U]
# avoid tuple to be regarded as freq
try:
return cls(freq=string)
except ValueError:
pass
raise TypeError("could not construct PeriodDtype")
def __unicode__(self):
return compat.text_type(self.name)
@property
def name(self):
return str("period[{freq}]".format(freq=self.freq.freqstr))
@property
def na_value(self):
return NaT
def __hash__(self):
# make myself hashable
return hash(str(self))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name or other == self.name.title()
return isinstance(other, PeriodDtype) and self.freq == other.freq
@classmethod
def is_dtype(cls, dtype):
"""
Return a boolean if we if the passed type is an actual dtype that we
can match (via string or type)
"""
if isinstance(dtype, compat.string_types):
# PeriodDtype can be instantiated from freq string like "U",
# but doesn't regard freq str like "U" as dtype.
if dtype.startswith('period[') or dtype.startswith('Period['):
try:
if cls._parse_dtype_strict(dtype) is not None:
return True
else:
return False
except ValueError:
return False
else:
return False
return super(PeriodDtype, cls).is_dtype(dtype)
@classmethod
def construct_array_type(cls):
from pandas.core.arrays import PeriodArray
return PeriodArray
@register_extension_dtype
class IntervalDtype(PandasExtensionDtype, ExtensionDtype):
"""
A Interval duck-typed class, suitable for holding an interval
THIS IS NOT A REAL NUMPY DTYPE
"""
name = 'interval'
kind = None
str = '|O08'
base = np.dtype('O')
num = 103
_metadata = ('subtype',)
_match = re.compile(r"(I|i)nterval\[(?P<subtype>.+)\]")
_cache = {}
def __new__(cls, subtype=None):
"""
Parameters
----------
subtype : the dtype of the Interval
"""
from pandas.core.dtypes.common import (
is_categorical_dtype, is_string_dtype, pandas_dtype)
if isinstance(subtype, IntervalDtype):
return subtype
elif subtype is None:
# we are called as an empty constructor
# generally for pickle compat
u = object.__new__(cls)
u.subtype = None
return u
elif (isinstance(subtype, compat.string_types) and
subtype.lower() == 'interval'):
subtype = None
else:
if isinstance(subtype, compat.string_types):
m = cls._match.search(subtype)
if m is not None:
subtype = m.group('subtype')
try:
subtype = pandas_dtype(subtype)
except TypeError:
raise TypeError("could not construct IntervalDtype")
if is_categorical_dtype(subtype) or is_string_dtype(subtype):
# GH 19016
msg = ('category, object, and string subtypes are not supported '
'for IntervalDtype')
raise TypeError(msg)
try:
return cls._cache[str(subtype)]
except KeyError:
u = object.__new__(cls)
u.subtype = subtype
cls._cache[str(subtype)] = u
return u
@classmethod
def construct_array_type(cls):
"""
Return the array type associated with this dtype
Returns
-------
type
"""
from pandas.core.arrays import IntervalArray
return IntervalArray
@classmethod
def construct_from_string(cls, string):
"""
attempt to construct this type from a string, raise a TypeError
if its not possible
"""
if (isinstance(string, compat.string_types) and
(string.startswith('interval') or
string.startswith('Interval'))):
return cls(string)
msg = "a string needs to be passed, got type {typ}"
raise TypeError(msg.format(typ=type(string)))
@property
def type(self):
return Interval
def __unicode__(self):
if self.subtype is None:
return "interval"
return "interval[{subtype}]".format(subtype=self.subtype)
def __hash__(self):
# make myself hashable
return hash(str(self))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other.lower() in (self.name.lower(), str(self).lower())
elif not isinstance(other, IntervalDtype):
return False
elif self.subtype is None or other.subtype is None:
# None should match any subtype
return True
else:
from pandas.core.dtypes.common import is_dtype_equal
return is_dtype_equal(self.subtype, other.subtype)
@classmethod
def is_dtype(cls, dtype):
"""
Return a boolean if we if the passed type is an actual dtype that we
can match (via string or type)
"""
if isinstance(dtype, compat.string_types):
if dtype.lower().startswith('interval'):
try:
if cls.construct_from_string(dtype) is not None:
return True
else:
return False
except ValueError:
return False
else:
return False
return super(IntervalDtype, cls).is_dtype(dtype)
| bsd-3-clause |
OpenPHDGuiding/phd2 | contributions/MPI_IS_gaussian_process/tools/plot_gp_data.py | 1 | 1589 | #!/usr/bin/env python
from numpy import genfromtxt
import matplotlib.pyplot as plt
def read_data():
measurement_data = genfromtxt('measurement_data.csv', delimiter=',') # read GP data from csv
measurement_data = measurement_data[1:,:] # strip first line to remove header text
location = measurement_data[:,0]
output = measurement_data[:,1]
gp_data = genfromtxt('gp_data.csv', delimiter=',')
gp_data = gp_data[1:,:]
x_range = gp_data[:,0]
mean = gp_data[:,1]
std = gp_data[:,2]
return location, output, x_range, mean, std
def update_plot(fig, axes, p1, p2, p3, p4):
#print("update_plot() called")
location, output, x_range, mean, std = read_data()
#plt.clf()
#plt.plot(location, output, 'r+')
#plt.plot(x_range, mean, '-b')
#plt.plot(x_range, mean+2*std, ':b')
#plt.plot(x_range, mean-2*std, ':b')
p1.set_data(location,output)
p2.set_data(x_range,mean)
p3.set_data(x_range,mean+2*std)
p4.set_data(x_range,mean-2*std)
axes.relim()
axes.autoscale_view(True,True,True)
fig.canvas.draw()
#plt.draw()
def main():
print("main function started")
plt.ion()
fig = plt.figure()
axes = fig.add_subplot(111)
p1, = plt.plot([],[],'r+')
p2, = plt.plot([],[], '-b')
p3, = plt.plot([],[], ':b')
p4, = plt.plot([],[], ':b')
while True:
try:
update_plot(fig, axes, p1, p2, p3, p4)
except:
pass
plt.pause(1.0)
if __name__ == "__main__":
main()
| bsd-3-clause |
equialgo/scikit-learn | sklearn/utils/setup.py | 77 | 2993 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.pyx'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.pyx'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension('murmurhash',
sources=['murmurhash.pyx', join(
'src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.pyx', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.pyx'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.pyx'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.pyx'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.pyx'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.pyx"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
ycaihua/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 142 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
rknLA/sms-tools | lectures/05-Sinusoidal-model/plots-code/sineModel-anal-synth.py | 24 | 1483 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import sineModel as SM
import utilFunctions as UF
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/bendir.wav'))
x1 = x[0:50000]
w = np.blackman(2001)
N = 2048
H = 500
t = -90
minSineDur = .01
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.02
Ns = 512
H = Ns/4
tfreq, tmag, tphase = SM.sineModelAnal(x1, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
y = SM.sineModelSynth(tfreq, tmag, tphase, Ns, H, fs)
numFrames = int(tfreq[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
maxplotfreq = 3000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(3,1,1)
plt.plot(np.arange(x1.size)/float(fs), x1, 'b', lw=1.5)
plt.axis([0,x1.size/float(fs),min(x1),max(x1)])
plt.title('x (bendir.wav)')
plt.subplot(3,1,2)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('f_t, sine frequencies')
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y, 'b', lw=1.5)
plt.axis([0,y.size/float(fs),min(y),max(y)])
plt.title('y')
plt.tight_layout()
UF.wavwrite(y, fs, 'bendir-sine-synthesis.wav')
plt.savefig('sineModel-anal-synth.png')
plt.show()
| agpl-3.0 |
yorkerlin/shogun | examples/undocumented/python_modular/graphical/inverse_covariance_estimation_demo.py | 26 | 2520 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from pylab import show, imshow
def simulate_data (n,p):
from modshogun import SparseInverseCovariance
import numpy as np
#create a random pxp covariance matrix
cov = np.random.normal(size=(p,p))
#generate data set with multivariate Gaussian distribution
mean = [0] * p
data = np.random.multivariate_normal(mean, cov, n)
return data
def inverse_covariance (data,lc):
from modshogun import SparseInverseCovariance
from numpy import dot
sic = SparseInverseCovariance()
#by default cov() expects each row to represent a variable, with observations in the columns
cov = np.cov(data.T)
max_cov = cov.max()
min_cov = cov.min()
#compute inverse conariance matrix
Si = sic.estimate(cov,lc)
return Si
def draw_graph(sic, subplot):
import numpy as np
import networkx as nx
#create list of edges
#an egde means there is a dependency between variables
#0 value in sic matrix mean independent variables given all the other variables
p = sic.shape[0]
X, Y = np.meshgrid(range(p), range(p))
graph = np.array((X[sic != 0], Y[sic != 0])).T
# extract nodes from graph
nodes = set([n1 for n1, n2 in graph] + [n2 for n1, n2 in graph])
# create networkx graph
G=nx.Graph()
# add nodes
for node in nodes:
G.add_node(node)
# add edges
for edge in graph:
G.add_edge(edge[0], edge[1])
# draw graph
nx.draw(G, ax=subplot)
# show graph
return graph
if __name__=='__main__':
#edit here for your own simulation
num_observations = 100
num_variables = 11
penalties = [0.00001, 0.05, 0.1, 0.5, 1, 2]
columns = len(penalties)
#plot the heat map and the graphs of dependency between variables
#for different penaltiy values
f, axarr = plt.subplots(2, columns)
f.suptitle('Inverse Covariance Estimation\nfor ' +str(num_variables)+' variables and '+str(num_observations)+' observations', fontsize=20)
data = simulate_data (num_observations, num_variables)
print data.shape
column = -1;
for p in penalties:
column = column + 1
sic = inverse_covariance (data,p)
i = axarr[0, column].imshow(sic, cmap="hot", interpolation='nearest')
axarr[0, column].set_title('penalty='+str(p), fontsize=10)
graph = draw_graph(sic, plt.subplot(2, columns, column + columns + 1))
axarr[1, column].set_title(str((len(graph) - num_variables)/2) + ' depedences', fontsize=10)
f.subplots_adjust(right=0.8)
cbar_ax = f.add_axes([0.85, 0.15, 0.05, 0.7])
f.colorbar(i, cax=cbar_ax)
show();
| gpl-3.0 |
DigasNikas/PyRecommender | recommender/content/description_based.py | 1 | 3536 | """
# By Diogo Nicolau
"""
import pandas as pd
import time
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
from nltk.corpus import stopwords
import datetime
import re
clean_r = re.compile('<.*?>')
stops = set(stopwords.words("english"))
def main(data_source,output,number_recs):
start = time.time()
logfile = open('log.txt', 'a')
print('\nProcess started....')
logfile.write('\nProcess started....\n')
print('Loading input file....')
logfile.write('Loading input file....\n')
ds = pd.read_csv(data_source)
ds['description'] = ds['description'].apply(clean)
print("Training data ingested in %s seconds." % (time.time() - start))
start = time.time()
_train(ds, output, number_recs, logfile)
print("Engine trained in %s seconds." % (time.time() - start))
def clean(raw):
"""
:param raw: the raw description
:return result: a new description without any stopword nor html markups
"""
clean_text = re.sub(clean_r, '', raw)
result = ""
for word in clean_text.split():
if word.lower() not in stops:
result = result + " " + word.lower()
return result
def _train(ds, output, number_recs, logfile):
"""
Train the engine.
Create a TF-IDF matrix of unigrams, bigrams, and trigrams
for each product. The 'stop_words' param tells the TF-IDF
module to ignore common english words like 'the', etc.
Then we compute similarity between all products using
SciKit Learn's linear_kernel (which in this case is
equivalent to cosine similarity).
Iterate through each item's similar items and store the
n most-similar.
Similarities and their scores are stored in txt as a
Sorted Set, with one set for each item.
:param ds: A pandas dataset containing two fields: description & id
:return: txt file with recommendations per item
"""
dt = datetime.datetime
print('Training started....')
logfile.write('Training started....\n')
print('TF-IDF Vectorizing....')
logfile.write('TF-IDF Vectorizing....\n')
tf = TfidfVectorizer(analyzer='word',
ngram_range=(1, 3),
min_df=0,
stop_words='english')
print('Classification of text.... [{}]'.format(dt.now()))
logfile.write('Classification of text.... [{}]\n'.format(dt.now()))
tfidf_matrix = tf.fit_transform(ds['description'])
print('Extract similarity matrix...[{}]'.format(dt.now()))
logfile.write('Extract similarity matrix...[{}]\n'.format(dt.now()))
cosine_similarities = linear_kernel(tfidf_matrix, tfidf_matrix)
print('Finding most similar items started....[{}]'.format(dt.now()))
logfile.write('Finding most similar items started....[{}]\n'.format(dt.now()))
with open(output, 'w') as outf:
for idx, row in ds.iterrows():
similar_indices = cosine_similarities[idx].argsort()[:-int(number_recs):-1]
similar_items = [(cosine_similarities[idx][i], ds['id'][i])
for i in similar_indices]
result_string = '"{}"'.format(ds['id'][idx])
for similar_item in similar_items:
if similar_item[1] != ds['id'][idx]:
result_string += ',("{}",{})'.format(str(similar_item[1]), str(similar_item[0]))
outf.write(result_string+"\n")
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2], sys.argv[3])
| mit |
IshankGulati/scikit-learn | sklearn/neighbors/__init__.py | 71 | 1025 | """
The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
algorithm.
"""
from .ball_tree import BallTree
from .kd_tree import KDTree
from .dist_metrics import DistanceMetric
from .graph import kneighbors_graph, radius_neighbors_graph
from .unsupervised import NearestNeighbors
from .classification import KNeighborsClassifier, RadiusNeighborsClassifier
from .regression import KNeighborsRegressor, RadiusNeighborsRegressor
from .nearest_centroid import NearestCentroid
from .kde import KernelDensity
from .approximate import LSHForest
from .lof import LocalOutlierFactor
__all__ = ['BallTree',
'DistanceMetric',
'KDTree',
'KNeighborsClassifier',
'KNeighborsRegressor',
'NearestCentroid',
'NearestNeighbors',
'RadiusNeighborsClassifier',
'RadiusNeighborsRegressor',
'kneighbors_graph',
'radius_neighbors_graph',
'KernelDensity',
'LSHForest',
'LocalOutlierFactor']
| bsd-3-clause |
tjflexic/psb-adr | src/ensemble.py | 1 | 3468 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from concept_matching import run_cm
from maxent_tfidf import run_tfidf
from maxent_nblcr import run_nblcr
from maxent_we import run_we
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import f1_score, classification_report
def find_best_weights(train, test, clf, thresh=0.5):
y_pred_cm = run_cm(train, test, '../data/ADR-lexicon.txt')
_, y_prob_tfidf = run_tfidf(train, test, grams='123', n_dim=40000, clf=clf)
_, y_prob_nblcr = run_nblcr(train, test, '../data/nblcr', grams='123', clf=clf)
_, y_prob_we = run_we(train, test, '../data/w2v_150.txt', 150, clf=clf)
alphas = np.float32(np.linspace(0, 1, 21))
max_f1 = 0
best_weights = [0, 0, 0]
for alpha1 in alphas:
for alpha2 in alphas:
for alpha3 in alphas:
if alpha1 + alpha2 + alpha3 > 1: continue
y_pred = []
for i in xrange(len(y_pred_cm)):
val = alpha1*y_pred_cm[i] + alpha2*y_prob_tfidf[i,1] + alpha3*y_prob_nblcr[i,1] + (1-alpha1-alpha2-alpha3)*y_prob_we[i,1]
if val >= thresh: y_pred.append(1)
else: y_pred.append(0)
f1 = f1_score(test['label'], y_pred)
if f1 > max_f1:
best_weights = [alpha1, alpha2, alpha3]
max_f1 = f1
return best_weights, max_f1
def run_ensemble(train, test, weights, clf, thresh=0.5):
y_pred_cm = run_cm(train, test, '../data/ADR-lexicon.txt')
_, y_prob_tfidf = run_tfidf(train, test, grams='123', n_dim=40000, clf=clf)
_, y_prob_nblcr = run_nblcr(train, test, '../data/nblcr', grams='123', clf=clf)
_, y_prob_we = run_we(train, test, '../data/w2v_150.txt', 150, clf=clf)
y_pred = []
for i in xrange(len(y_pred_cm)):
val = weights[0]*y_pred_cm[i] + weights[1]*y_prob_tfidf[i,1] + weights[2]*y_prob_nblcr[i,1] + (1-weights[0]-weights[1]-weights[2])*y_prob_we[i,1]
if val >= thresh: y_pred.append(1)
else: y_pred.append(0)
return y_pred
if __name__ == '__main__':
print "Predict for validation data..."
train = pd.read_csv('../data/tweets-train.txt', names=['id','label','text'], sep='\t', quotechar='\t')
dev = pd.read_csv('../data/tweets-dev.txt', names=['id','label','text'], sep='\t', quotechar='\t')
clf = LogisticRegression(class_weight="auto")
#clf = SVC(kernel='linear', class_weight="auto", random_state=0)
#clf = KNeighborsClassifier(n_neighbors=25, weights='distance')
#clf = DecisionTreeClassifier(criterion='gini', class_weight="auto", max_features='sqrt', random_state=0)
weights, f1 = find_best_weights(train, dev, clf)
print "Best weights:", weights
print "Max f1-score:", f1
print "\nPredict for test data..."
test = pd.read_csv('../data/tweets-test.txt', names=['id','label','text'], sep='\t', quotechar='\t')
y_pred_test = run_ensemble(train, test, weights, clf)
output = pd.DataFrame(data={"id":test["id"], "label":y_pred_test})
output.to_csv('../data/test_result.txt', sep='\t', header=False, index=False, quoting=3)
print "Save results for test data."
| gpl-2.0 |
ElDeveloper/scikit-learn | examples/linear_model/plot_ridge_path.py | 254 | 1655 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
nicproulx/mne-python | examples/inverse/plot_compute_mne_inverse_volume.py | 40 | 1748 | """
=======================================================================
Compute MNE-dSPM inverse solution on evoked data in volume source space
=======================================================================
Compute dSPM inverse solution on MNE evoked dataset in a volume source
space and stores the solution in a nifti file for visualisation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
from nilearn.plotting import plot_stat_map
from nilearn.image import index_img
from mne.datasets import sample
from mne import read_evokeds
from mne.minimum_norm import apply_inverse, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-vol-7-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src']
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method)
stc.crop(0.0, 0.2)
# Export result as a 4D nifti object
img = stc.as_volume(src,
mri_resolution=False) # set True for full MRI resolution
# Save it as a nifti file
# nib.save(img, 'mne_%s_inverse.nii.gz' % method)
t1_fname = data_path + '/subjects/sample/mri/T1.mgz'
# Plotting with nilearn ######################################################
plot_stat_map(index_img(img, 61), t1_fname, threshold=8.,
title='%s (t=%.1f s.)' % (method, stc.times[61]))
plt.show()
| bsd-3-clause |
fengzhyuan/scikit-learn | sklearn/tree/tests/test_tree.py | 57 | 47417 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
| bsd-3-clause |
ville-k/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/arithmetic_transform_test.py | 62 | 2343 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for arithmetic transforms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe import tensorflow_dataframe as df
from tensorflow.python.platform import test
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class SumTestCase(test.TestCase):
"""Test class for `Sum` transform."""
def testSum(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a+b"] = frame["a"] + frame["b"]
expected_sum = pandas_df["a"] + pandas_df["b"]
actual_sum = frame.run_one_batch()["a+b"]
np.testing.assert_array_equal(expected_sum, actual_sum)
class DifferenceTestCase(test.TestCase):
"""Test class for `Difference` transform."""
def testDifference(self):
if not HAS_PANDAS:
return
num_rows = 100
pandas_df = pd.DataFrame({
"a": np.arange(num_rows),
"b": np.arange(num_rows, 2 * num_rows)
})
frame = df.TensorFlowDataFrame.from_pandas(
pandas_df, shuffle=False, batch_size=num_rows)
frame["a-b"] = frame["a"] - frame["b"]
expected_diff = pandas_df["a"] - pandas_df["b"]
actual_diff = frame.run_one_batch()["a-b"]
np.testing.assert_array_equal(expected_diff, actual_diff)
if __name__ == "__main__":
test.main()
| apache-2.0 |
soulmachine/scikit-learn | sklearn/ensemble/tests/test_base.py | 28 | 1334 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
"""Check BaseEnsemble methods."""
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
"""Check that instantiating a BaseEnsemble with n_estimators<=0 raises
a ValueError."""
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
wronk/mne-python | mne/viz/misc.py | 3 | 19647 | """Functions to make simple plots with M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Cathy Nangini <cnangini@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
import copy
from glob import glob
from itertools import cycle
import os.path as op
import numpy as np
from scipy import linalg
from ..surface import read_surface
from ..io.proj import make_projector
from ..utils import logger, verbose, get_subjects_dir, warn
from ..io.pick import pick_types
from .utils import tight_layout, COLORS, _prepare_trellis, plt_show
@verbose
def plot_cov(cov, info, exclude=[], colorbar=True, proj=False, show_svd=True,
show=True, verbose=None):
"""Plot Covariance data
Parameters
----------
cov : instance of Covariance
The covariance matrix.
info: dict
Measurement info.
exclude : list of string | str
List of channels to exclude. If empty do not exclude any channel.
If 'bads', exclude info['bads'].
colorbar : bool
Show colorbar or not.
proj : bool
Apply projections or not.
show_svd : bool
Plot also singular values of the noise covariance for each sensor
type. We show square roots ie. standard deviations.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig_cov : instance of matplotlib.pyplot.Figure
The covariance plot.
fig_svd : instance of matplotlib.pyplot.Figure | None
The SVD spectra plot of the covariance.
"""
if exclude == 'bads':
exclude = info['bads']
ch_names = [n for n in cov.ch_names if n not in exclude]
ch_idx = [cov.ch_names.index(n) for n in ch_names]
info_ch_names = info['ch_names']
sel_eeg = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude=exclude)
sel_mag = pick_types(info, meg='mag', eeg=False, ref_meg=False,
exclude=exclude)
sel_grad = pick_types(info, meg='grad', eeg=False, ref_meg=False,
exclude=exclude)
idx_eeg = [ch_names.index(info_ch_names[c])
for c in sel_eeg if info_ch_names[c] in ch_names]
idx_mag = [ch_names.index(info_ch_names[c])
for c in sel_mag if info_ch_names[c] in ch_names]
idx_grad = [ch_names.index(info_ch_names[c])
for c in sel_grad if info_ch_names[c] in ch_names]
idx_names = [(idx_eeg, 'EEG covariance', 'uV', 1e6),
(idx_grad, 'Gradiometers', 'fT/cm', 1e13),
(idx_mag, 'Magnetometers', 'fT', 1e15)]
idx_names = [(idx, name, unit, scaling)
for idx, name, unit, scaling in idx_names if len(idx) > 0]
C = cov.data[ch_idx][:, ch_idx]
if proj:
projs = copy.deepcopy(info['projs'])
# Activate the projection items
for p in projs:
p['active'] = True
P, ncomp, _ = make_projector(projs, ch_names)
if ncomp > 0:
logger.info(' Created an SSP operator (subspace dimension'
' = %d)' % ncomp)
C = np.dot(P, np.dot(C, P.T))
else:
logger.info(' The projection vectors do not apply to these '
'channels.')
import matplotlib.pyplot as plt
fig_cov = plt.figure(figsize=(2.5 * len(idx_names), 2.7))
for k, (idx, name, _, _) in enumerate(idx_names):
plt.subplot(1, len(idx_names), k + 1)
plt.imshow(C[idx][:, idx], interpolation="nearest", cmap='RdBu_r')
plt.title(name)
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26)
tight_layout(fig=fig_cov)
fig_svd = None
if show_svd:
fig_svd = plt.figure()
for k, (idx, name, unit, scaling) in enumerate(idx_names):
s = linalg.svd(C[idx][:, idx], compute_uv=False)
plt.subplot(1, len(idx_names), k + 1)
plt.ylabel('Noise std (%s)' % unit)
plt.xlabel('Eigenvalue index')
plt.semilogy(np.sqrt(s) * scaling)
plt.title(name)
tight_layout(fig=fig_svd)
plt_show(show)
return fig_cov, fig_svd
def plot_source_spectrogram(stcs, freq_bins, tmin=None, tmax=None,
source_index=None, colorbar=False, show=True):
"""Plot source power in time-freqency grid.
Parameters
----------
stcs : list of SourceEstimate
Source power for consecutive time windows, one SourceEstimate object
should be provided for each frequency bin.
freq_bins : list of tuples of float
Start and end points of frequency bins of interest.
tmin : float
Minimum time instant to show.
tmax : float
Maximum time instant to show.
source_index : int | None
Index of source for which the spectrogram will be plotted. If None,
the source with the largest activation will be selected.
colorbar : bool
If true, a colorbar will be added to the plot.
show : bool
Show figure if True.
"""
import matplotlib.pyplot as plt
# Input checks
if len(stcs) == 0:
raise ValueError('cannot plot spectrogram if len(stcs) == 0')
stc = stcs[0]
if tmin is not None and tmin < stc.times[0]:
raise ValueError('tmin cannot be smaller than the first time point '
'provided in stcs')
if tmax is not None and tmax > stc.times[-1] + stc.tstep:
raise ValueError('tmax cannot be larger than the sum of the last time '
'point and the time step, which are provided in stcs')
# Preparing time-frequency cell boundaries for plotting
if tmin is None:
tmin = stc.times[0]
if tmax is None:
tmax = stc.times[-1] + stc.tstep
time_bounds = np.arange(tmin, tmax + stc.tstep, stc.tstep)
freq_bounds = sorted(set(np.ravel(freq_bins)))
freq_ticks = copy.deepcopy(freq_bounds)
# Reject time points that will not be plotted and gather results
source_power = []
for stc in stcs:
stc = stc.copy() # copy since crop modifies inplace
stc.crop(tmin, tmax - stc.tstep)
source_power.append(stc.data)
source_power = np.array(source_power)
# Finding the source with maximum source power
if source_index is None:
source_index = np.unravel_index(source_power.argmax(),
source_power.shape)[1]
# If there is a gap in the frequency bins record its locations so that it
# can be covered with a gray horizontal bar
gap_bounds = []
for i in range(len(freq_bins) - 1):
lower_bound = freq_bins[i][1]
upper_bound = freq_bins[i + 1][0]
if lower_bound != upper_bound:
freq_bounds.remove(lower_bound)
gap_bounds.append((lower_bound, upper_bound))
# Preparing time-frequency grid for plotting
time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds)
# Plotting the results
fig = plt.figure(figsize=(9, 6))
plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :],
cmap='Reds')
ax = plt.gca()
plt.title('Time-frequency source power')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
time_tick_labels = [str(np.round(t, 2)) for t in time_bounds]
n_skip = 1 + len(time_bounds) // 10
for i in range(len(time_bounds)):
if i % n_skip != 0:
time_tick_labels[i] = ''
ax.set_xticks(time_bounds)
ax.set_xticklabels(time_tick_labels)
plt.xlim(time_bounds[0], time_bounds[-1])
plt.yscale('log')
ax.set_yticks(freq_ticks)
ax.set_yticklabels([np.round(freq, 2) for freq in freq_ticks])
plt.ylim(freq_bounds[0], freq_bounds[-1])
plt.grid(True, ls='-')
if colorbar:
plt.colorbar()
tight_layout(fig=fig)
# Covering frequency gaps with horizontal bars
for lower_bound, upper_bound in gap_bounds:
plt.barh(lower_bound, time_bounds[-1] - time_bounds[0], upper_bound -
lower_bound, time_bounds[0], color='#666666')
plt_show(show)
return fig
def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
slices=None, show=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
mri_fname : str
The name of the file containing anatomical data.
surf_fnames : list of str
The filenames for the BEM surfaces in the format
['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
orientation : str
'coronal' or 'axial' or 'sagittal'
slices : list of int
Slice indices.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
import nibabel as nib
if orientation not in ['coronal', 'axial', 'sagittal']:
raise ValueError("Orientation must be 'coronal', 'axial' or "
"'sagittal'. Got %s." % orientation)
# Load the T1 data
nim = nib.load(mri_fname)
data = nim.get_data()
affine = nim.get_affine()
n_sag, n_axi, n_cor = data.shape
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
if slices is None:
n_slices = data.shape[orientation_axis]
slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
# create of list of surfaces
surfs = list()
trans = linalg.inv(affine)
# XXX : next line is a hack don't ask why
trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
for surf_fname in surf_fnames:
surf = dict()
surf['rr'], surf['tris'] = read_surface(surf_fname)
# move back surface to MRI coordinate system
surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
surfs.append(surf)
fig, axs = _prepare_trellis(len(slices), 4)
for ax, sl in zip(axs, slices):
# adjust the orientations for good view
if orientation == 'coronal':
dat = data[:, :, sl].transpose()
elif orientation == 'axial':
dat = data[:, sl, :]
elif orientation == 'sagittal':
dat = data[sl, :, :]
# First plot the anatomical data
ax.imshow(dat, cmap=plt.cm.gray)
ax.axis('off')
# and then plot the contours on top
for surf in surfs:
if orientation == 'coronal':
ax.tricontour(surf['rr'][:, 0], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 2],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'axial':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 0],
surf['tris'], surf['rr'][:, 1],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'sagittal':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 0],
levels=[sl], colors='yellow', linewidths=2.0)
plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt_show(show)
return fig
def plot_bem(subject=None, subjects_dir=None, orientation='coronal',
slices=None, show=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
subject : str
Subject name.
subjects_dir : str | None
Path to the SUBJECTS_DIR. If None, the path is obtained by using
the environment variable SUBJECTS_DIR.
orientation : str
'coronal' or 'axial' or 'sagittal'.
slices : list of int
Slice indices.
show : bool
Show figure if True.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
# Get the MRI filename
mri_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
if not op.isfile(mri_fname):
raise IOError('MRI file "%s" does not exist' % mri_fname)
# Get the BEM surface filenames
bem_path = op.join(subjects_dir, subject, 'bem')
if not op.isdir(bem_path):
raise IOError('Subject bem directory "%s" does not exist' % bem_path)
surf_fnames = []
for surf_name in ['*inner_skull', '*outer_skull', '*outer_skin']:
surf_fname = glob(op.join(bem_path, surf_name + '.surf'))
if len(surf_fname) > 0:
surf_fname = surf_fname[0]
logger.info("Using surface: %s" % surf_fname)
surf_fnames.append(surf_fname)
if len(surf_fnames) == 0:
raise IOError('No surface files found. Surface files must end with '
'inner_skull.surf, outer_skull.surf or outer_skin.surf')
# Plot the contours
return _plot_mri_contours(mri_fname, surf_fnames, orientation=orientation,
slices=slices, show=show)
def plot_events(events, sfreq=None, first_samp=0, color=None, event_id=None,
axes=None, equal_spacing=True, show=True):
"""Plot events to get a visual display of the paradigm
Parameters
----------
events : array, shape (n_events, 3)
The events.
sfreq : float | None
The sample frequency. If None, data will be displayed in samples (not
seconds).
first_samp : int
The index of the first sample. Typically the raw.first_samp
attribute. It is needed for recordings on a Neuromag
system as the events are defined relative to the system
start and not to the beginning of the recording.
color : dict | None
Dictionary of event_id value and its associated color. If None,
colors are automatically drawn from a default list (cycled through if
number of events longer than list of default colors).
event_id : dict | None
Dictionary of event label (e.g. 'aud_l') and its associated
event_id value. Label used to plot a legend. If None, no legend is
drawn.
axes : instance of matplotlib.axes.AxesSubplot
The subplot handle.
equal_spacing : bool
Use equal spacing between events in y-axis.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
if sfreq is None:
sfreq = 1.0
xlabel = 'samples'
else:
xlabel = 'Time (s)'
events = np.asarray(events)
unique_events = np.unique(events[:, 2])
if event_id is not None:
# get labels and unique event ids from event_id dict,
# sorted by value
event_id_rev = dict((v, k) for k, v in event_id.items())
conditions, unique_events_id = zip(*sorted(event_id.items(),
key=lambda x: x[1]))
for this_event in unique_events_id:
if this_event not in unique_events:
raise ValueError('%s from event_id is not present in events.'
% this_event)
for this_event in unique_events:
if this_event not in unique_events_id:
warn('event %s missing from event_id will be ignored'
% this_event)
else:
unique_events_id = unique_events
if color is None:
if len(unique_events) > len(COLORS):
warn('More events than colors available. You should pass a list '
'of unique colors.')
colors = cycle(COLORS)
color = dict()
for this_event, this_color in zip(unique_events_id, colors):
color[this_event] = this_color
else:
for this_event in color:
if this_event not in unique_events_id:
raise ValueError('%s from color is not present in events '
'or event_id.' % this_event)
for this_event in unique_events_id:
if this_event not in color:
warn('Color is not available for event %d. Default colors '
'will be used.' % this_event)
import matplotlib.pyplot as plt
fig = None
if axes is None:
fig = plt.figure()
ax = axes if axes else plt.gca()
unique_events_id = np.array(unique_events_id)
min_event = np.min(unique_events_id)
max_event = np.max(unique_events_id)
for idx, ev in enumerate(unique_events_id):
ev_mask = events[:, 2] == ev
kwargs = {}
if event_id is not None:
event_label = '{0} ({1})'.format(event_id_rev[ev],
np.sum(ev_mask))
kwargs['label'] = event_label
if ev in color:
kwargs['color'] = color[ev]
if equal_spacing:
ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
(idx + 1) * np.ones(ev_mask.sum()), '.', **kwargs)
else:
ax.plot((events[ev_mask, 0] - first_samp) / sfreq,
events[ev_mask, 2], '.', **kwargs)
if equal_spacing:
ax.set_ylim(0, unique_events_id.size + 1)
ax.set_yticks(1 + np.arange(unique_events_id.size))
ax.set_yticklabels(unique_events_id)
else:
ax.set_ylim([min_event - 1, max_event + 1])
ax.set_xlabel(xlabel)
ax.set_ylabel('Events id')
ax.grid('on')
fig = fig if fig is not None else plt.gcf()
if event_id is not None:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fig.canvas.draw()
plt_show(show)
return fig
def _get_presser(fig):
"""Helper to get our press callback"""
callbacks = fig.canvas.callbacks.callbacks['button_press_event']
func = None
for key, val in callbacks.items():
if val.func.__class__.__name__ == 'partial':
func = val.func
break
assert func is not None
return func
def plot_dipole_amplitudes(dipoles, colors=None, show=True):
"""Plot the amplitude traces of a set of dipoles
Parameters
----------
dipoles : list of instance of Dipoles
The dipoles whose amplitudes should be shown.
colors: list of colors | None
Color to plot with each dipole. If None default colors are used.
show : bool
Show figure if True.
Returns
-------
fig : matplotlib.figure.Figure
The figure object containing the plot.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
if colors is None:
colors = cycle(COLORS)
fig, ax = plt.subplots(1, 1)
xlim = [np.inf, -np.inf]
for dip, color in zip(dipoles, colors):
ax.plot(dip.times, dip.amplitude, color=color, linewidth=1.5)
xlim[0] = min(xlim[0], dip.times[0])
xlim[1] = max(xlim[1], dip.times[-1])
ax.set_xlim(xlim)
ax.set_xlabel('Time (sec)')
ax.set_ylabel('Amplitude (nAm)')
if show:
fig.show(warn=False)
return fig
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/sklearn/neighbors/regression.py | 5 | 11000 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Doesn't affect :meth:`fit` method.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, n_jobs=1,
**kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
Test samples.
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| mit |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/applications/plot_model_complexity_influence.py | 25 | 6378 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2**-15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
Dapid/scipy | doc/source/tutorial/examples/normdiscr_plot2.py | 84 | 1642 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2,1) #integer grid
gridlimitsnorm = (grid - 0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd = rvs
f,l = np.histogram(rvs,bins=gridlimits)
sfreq = np.vstack([gridint,f,probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
fs = sfreq[:,1].cumsum() / float(n_sample)
ft = sfreq[:,2].cumsum() / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.figure()
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.cdf(ind+0.5,scale=nd_std),
color='b')
plt.ylabel('cdf')
plt.title('Cumulative Frequency and CDF of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause |
APPIAN-PET/APPIAN | src/qc.py | 1 | 42641 | # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 mouse=a
import matplotlib
matplotlib.rcParams['figure.facecolor'] = '1.'
matplotlib.use('Agg')
import ants
import numpy as np
import pandas as pd
import os
import imageio
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as niu
import nibabel as nib
import shutil
import ntpath
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as niu
import nipype.interfaces.io as nio
import matplotlib.pyplot as plt
import seaborn as sns
import inspect
import json
import re
import time
import matplotlib.animation as animation
from skimage.feature import canny
from nibabel.processing import resample_to_output
from sklearn.metrics import normalized_mutual_info_score
from sklearn.ensemble import IsolationForest
from sklearn.cluster import DBSCAN
from sklearn.neighbors import LocalOutlierFactor
from sklearn.svm import OneClassSVM
from skimage.filters import threshold_otsu
from math import sqrt, log, ceil
from os import getcwd
from os.path import basename
from sys import argv, exit
from glob import glob
from src.outlier import kde, MAD
from sklearn.neighbors import LocalOutlierFactor
from src.utils import concat_df
from nipype.interfaces.base import (TraitedSpec, File, traits, InputMultiPath,
BaseInterface, OutputMultiPath, BaseInterfaceInputSpec, isdefined)
from scipy.ndimage.filters import gaussian_filter
from nipype.utils.filemanip import (load_json, save_json, split_filename, fname_presuffix, copyfile)
_file_dir, fn =os.path.split( os.path.abspath(__file__) )
def load_3d(fn, t=0):
print('Reading Frame %d'%t,'from', fn)
img = nib.load(fn)
vol = img.get_fdata()
hd = img.header
if len(vol.shape) == 4 :
vol = vol[:,:,:,t]
vol = vol.reshape(vol.shape[0:3] )
img = nib.Nifti1Image(vol, img.affine)
return img, vol
def get_spacing(aff, i) :
return aff[i, np.argmax(np.abs(aff[i,0:3]))]
######################
# Group-level QC #
######################
#datasink for dist metrics
#check how the calc outlier measure node is implemented, may need to be reimplemented
final_dir="qc"
def group_level_qc(opts, args):
#setup workflow
workflow = pe.Workflow(name=qc_err+opts.preproc_dir)
workflow.base_dir = opts.targetDir
#Datasink
datasink=pe.Node(interface=nio.DataSink(), name=qc_err+"output")
datasink.inputs.base_directory= opts.targetDir +os.sep +"qc"
datasink.inputs.substitutions = [('_cid_', ''), ('sid_', '')]
outfields=['coreg_metrics','tka_metrics','pvc_metrics']
paths={'coreg_metrics':"*/coreg_qc_metrics/*_metric.csv", 'tka_metrics':"*/results_tka/*_3d.csv",'pvc_metrics':"*/pvc_qc_metrics/*qc_metric.csv"}
#If any one of the sets of metrics does not exist because it has not been run at the scan level, then
#remove it from the list of outfields and paths that the datagrabber will look for.
for outfield, path in paths.items(): # zip(paths, outfields):
full_path = opts.targetDir + os.sep + opts.preproc_dir + os.sep + path
print(full_path)
if len(glob(full_path)) == 0 :
outfields.remove(outfield)
paths.pop(outfield)
#Datagrabber
datasource = pe.Node( interface=nio.DataGrabber( outfields=outfields, raise_on_empty=True, sort_filelist=False), name=qc_err+"datasource")
datasource.inputs.base_directory = opts.targetDir + os.sep +opts.preproc_dir
datasource.inputs.template = '*'
datasource.inputs.field_template = paths
#datasource.inputs.template_args = dict( coreg_metrics = [['preproc_dir']] )
##################
# Coregistration #
##################
qc_err=''
if opts.pvc_label_name != None :
qc_err += "_"+opts.pvc_label_name
if opts.quant_label_name != None :
qc_err += "_"+opts.quant_label_name
if opts.results_label_name != None :
qc_err += "_"+opts.results_label_name
qc_err += "_"
if 'coreg_metrics' in outfields:
#Concatenate distance metrics
concat_coreg_metricsNode=pe.Node(interface=concat_df(), name=qc_err+"concat_coreg_metrics")
concat_coreg_metricsNode.inputs.out_file="coreg_qc_metrics.csv"
workflow.connect(datasource, 'coreg_metrics', concat_coreg_metricsNode, 'in_list')
workflow.connect(concat_coreg_metricsNode, "out_file", datasink, 'coreg/metrics')
#Plot Coregistration Metrics
plot_coreg_metricsNode=pe.Node(interface=plot_qcCommand(), name=qc_err+"plot_coreg_metrics")
workflow.connect(concat_coreg_metricsNode, "out_file", plot_coreg_metricsNode, 'in_file')
workflow.connect(plot_coreg_metricsNode, "out_file", datasink, 'coreg/metrics_plot')
#Calculate Coregistration outlier measures
outlier_measureNode = pe.Node(interface=outlier_measuresCommand(), name=qc_err+"coregistration_outlier_measure")
workflow.connect(concat_coreg_metricsNode, 'out_file', outlier_measureNode, 'in_file')
workflow.connect(outlier_measureNode, "out_file", datasink, 'coreg/outlier')
#Plot coregistration outlier measures
plot_coreg_measuresNode=pe.Node(interface=plot_qcCommand(),name=qc_err+"plot_coreg_measures")
workflow.connect(outlier_measureNode,"out_file",plot_coreg_measuresNode,'in_file')
workflow.connect(plot_coreg_measuresNode,"out_file",datasink,'coreg/measures_plot')
#######
# PVC #
#######
if 'pvc_metrics' in outfields:
#Concatenate PVC metrics
concat_pvc_metricsNode=pe.Node(interface=concat_df(), name=qc_err+"concat_pvc_metrics")
concat_pvc_metricsNode.inputs.out_file="pvc_qc_metrics.csv"
workflow.connect(datasource, 'pvc_metrics', concat_pvc_metricsNode, 'in_list')
workflow.connect(concat_pvc_metricsNode, "out_file", datasink, 'pvc/metrics')
#Plot PVC Metrics
plot_pvc_metricsNode=pe.Node(interface=plot_qcCommand(), name=qc_err+"plot_pvc_metrics")
workflow.connect(concat_pvc_metricsNode, "out_file", plot_pvc_metricsNode, 'in_file')
workflow.connect(plot_pvc_metricsNode, "out_file", datasink, 'pvc/metrics_plot')
#Calculate PVC outlier measures
pvc_outlier_measureNode = pe.Node(interface=outlier_measuresCommand(), name=qc_err+"pvc_outlier_measure")
workflow.connect(concat_pvc_metricsNode, 'out_file', pvc_outlier_measureNode, 'in_file')
workflow.connect(pvc_outlier_measureNode, "out_file", datasink, 'pvc/outlier')
#Plot PVC outlier measures
plot_pvc_measuresNode=pe.Node(interface=plot_qcCommand(), name=qc_err+"plot_pvc_measures")
workflow.connect(pvc_outlier_measureNode,"out_file",plot_pvc_measuresNode,'in_file')
workflow.connect(plot_pvc_measuresNode, "out_file", datasink, 'pvc/measures_plot')
#######
# TKA #
#######
if 'tka_metrics' in outfields:
#Concatenate TKA metrics
concat_tka_metricsNode=pe.Node(interface=concat_df(), name=qc_err+"concat_tka_metrics")
concat_tka_metricsNode.inputs.out_file="tka_qc_metrics.csv"
workflow.connect(datasource, 'tka_metrics', concat_tka_metricsNode, 'in_list')
workflow.connect(concat_tka_metricsNode, "out_file", datasink, 'tka/metrics')
#Plot TKA Metrics
plot_tka_metricsNode=pe.Node(interface=plot_qcCommand(), name=qc_err+"plot_tka_metrics")
workflow.connect(concat_tka_metricsNode, "out_file", plot_tka_metricsNode, 'in_file')
workflow.connect(plot_tka_metricsNode, "out_file", datasink, 'tka/metrics_plot')
#Calculate TKA outlier measures
tka_outlier_measureNode = pe.Node(interface=outlier_measuresCommand(), name=qc_err+"tka_outlier_measure")
workflow.connect(concat_tka_metricsNode, 'out_file', tka_outlier_measureNode, 'in_file')
workflow.connect(tka_outlier_measureNode, "out_file", datasink, 'tka/outlier')
#Plot PVC outlier measures
plot_tka_measuresNode=pe.Node(interface=plot_qcCommand(), name=qc_err+"plot_tka_measures")
workflow.connect(tka_outlier_measureNode,"out_file",plot_tka_measuresNode,'in_file')
workflow.connect(plot_tka_measuresNode, "out_file", datasink, 'tka/measures_plot')
workflow.run()
####################
# Distance Metrics #
####################
__NBINS=-1
import copy
def pvc_mse(pvc_fn, pve_fn, fwhm):
pvc = nib.load(pvc_fn)
pvc.data = pvc.get_data()
pve = nib.load(pve_fn)
pve.data = pve.get_data()
mse = 0
if len(pvc.data.shape) > 3 :#if volume has more than 3 dimensions
t = int(pvc.data.shape[3]/2)
#for t in range(pvc.sizes[0]):
pve_frame = pve.data[:,:,:,t]
pvc_frame = pvc.data[:,:,:,t]
n = np.sum(pve.data[t,:,:,:]) # np.prod(pve.data.shape[0:4])
pvc_blur = gaussian_filter(pvc_frame,fwhm)
m = np.sum(np.sqrt((pve_frame - pvc_blur)**2))
mse += m
print(t, m)
else : #volume has 3 dimensions
n = np.sum(pve.data) # np.prod(pve.data.shape[0:3])
pvc_blur = gaussian_filter(pvc.data,fwhm)
m = np.sum(np.sqrt((pve.data - pvc_blur)**2))
mse += m
mse = -mse / n #np.sum(pve.data)
print("PVC MSE:", mse)
return mse
####################
# Outlier Measures #
####################
def _IsolationForest(X):
X = np.array(X)
if len(X.shape) == 1 :
X=X.reshape(-1,1)
rng = np.random.RandomState(42)
clf = IsolationForest(max_samples=X.shape[0], random_state=rng)
return clf.fit(X).predict(X)
def _LocalOutlierFactor(X):
X = np.array(X)
if len(X.shape) == 1 :
X=X.reshape(-1,1)
n=int(round(X.shape[0]*0.2))
clf = LocalOutlierFactor(n_neighbors=n)
clf.fit_predict(X)
return clf.negative_outlier_factor_
def _OneClassSVM(X):
clf = OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X)
return clf.predict(X)
def _dbscan(X):
db = DBSCAN(eps=0.3)
return db.fit_predict(X)
###########
# Globals #
###########
global distance_metrics
global outlier_measures
global metric_columns
global outlier_columns
outlier_measures={"KDE":kde, "LOF": _LocalOutlierFactor, "IsolationForest":_IsolationForest, "MAD":MAD} #, "DBSCAN":_dbscan, "OneClassSVM":_OneClassSVM }
metric_columns = ['analysis', 'sub','ses','task','run','acq','rec','roi','metric','value']
outlier_columns = ['analysis', 'sub','ses','task','roi','metric','measure','value']
#######################
### Outlier Metrics ###
#######################
### PVC Metrics
class pvc_qc_metricsOutput(TraitedSpec):
out_file = traits.File(desc="Output file")
class pvc_qc_metricsInput(BaseInterfaceInputSpec):
pve = traits.File(exists=True, mandatory=True, desc="Input PVE PET image")
pvc = traits.File(exists=True, mandatory=True, desc="Input PVC PET")
fwhm = traits.List(desc='FWHM of the scanner')
sub = traits.Str("Subject ID")
task = traits.Str("Task")
ses = traits.Str("Ses")
run = traits.Str("Run")
rec = traits.Str("Reconstruction")
acq = traits.Str("Acquisition")
out_file = traits.File(desc="Output file")
class pvc_qc_metrics(BaseInterface):
input_spec = pvc_qc_metricsInput
output_spec = pvc_qc_metricsOutput
def _gen_output(self, sid, ses, task,run,acq,rec, fname ="pvc_qc_metric.csv"):
dname = os.getcwd()
fn = dname+os.sep+'sub-'+sid+'_ses-'+ses+'_task-'+task
if isdefined(run) :
fn += '_run-'+str(run)
fn += "_acq-"+str(acq)+"_rec-"+str(rec)+fname
return fn
def _run_interface(self, runtime):
sub = self.inputs.sub
ses = self.inputs.ses
task = self.inputs.task
fwhm = self.inputs.fwhm
run = self.inputs.run
rec = self.inputs.rec
acq = self.inputs.acq
df = pd.DataFrame([], columns=metric_columns)
pvc_metrics={'mse':pvc_mse }
for metric_name, metric_function in pvc_metrics.items():
mse = pvc_mse(self.inputs.pvc, self.inputs.pve, fwhm)
temp = pd.DataFrame([['pvc', sub,ses,task,run,acq,rec,'02',metric_name,mse]], columns=metric_columns)
df = pd.concat([df, temp])
df.fillna(0, inplace=True)
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_output(self.inputs.sub, self.inputs.ses, self.inputs.task, self.inputs.run, self.inputs.acq, self.inputs.rec)
df.to_csv(self.inputs.out_file, index=False)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self.inputs._gen_output(self.inputs.sid,self.inputs.ses, self.inputs.task, self.inputs.run, self.inputs.acq, self.inputs.rec)
outputs["out_file"] = self.inputs.out_file
return outputs
### Coregistration Metrics
class coreg_qc_metricsOutput(TraitedSpec):
out_file = traits.File(desc="Output file")
class coreg_qc_metricsInput(BaseInterfaceInputSpec):
pet = traits.File(exists=True, mandatory=True, desc="Input PET image")
t1 = traits.File(exists=True, mandatory=True, desc="Input T1 MRI")
brain_mask_space_mri = traits.File(exists=True, mandatory=True, desc="Input T1 MRI")
pet_brain_mask = traits.File(exists=True, mandatory=True, desc="Input T1 MRI")
sid = traits.Str(desc="Subject")
ses = traits.Str(desc="Session")
task = traits.Str(desc="Task")
run = traits.Str(desc="Run")
rec = traits.Str(desc="Reconstruction")
acq = traits.Str(desc="Acquisition")
study_prefix = traits.Str(desc="Study Prefix")
out_file = traits.File(desc="Output file")
clobber = traits.Bool(desc="Overwrite output file", default=False)
class coreg_qc_metricsCommand(BaseInterface):
input_spec = coreg_qc_metricsInput
output_spec = coreg_qc_metricsOutput
def _gen_output(self, sid, ses, task, run, rec, acq, fname ="distance_metric.csv"):
dname = os.getcwd()
fn = dname+os.sep+'sub-'+sid+'_ses-'+ses+'_task-'+task
if isdefined(run) :
fn += '_run-'+str(run)
fn += "_acq-"+str(acq)+"_rec-"+str(rec)+fname
return fn
def _run_interface(self, runtime):
sub_df=pd.DataFrame(columns=metric_columns )
pet = self.inputs.pet
t1 = self.inputs.t1
sid = self.inputs.sid
ses = self.inputs.ses
task = self.inputs.task
run = self.inputs.run
rec = self.inputs.rec
acq = self.inputs.acq
brain_mask_space_mri = self.inputs.brain_mask_space_mri
pet_brain_mask = self.inputs.pet_brain_mask
coreg_metrics=['MattesMutualInformation']
path, ext = os.path.splitext(pet)
base=basename(path)
param=base.split('_')[-1]
param_type=base.split('_')[-2]
df=pd.DataFrame(columns=metric_columns )
def image_read(fn) :
img, vol = load_3d(fn)
vol = vol.astype(float)
aff = img.affine
origin = [ aff[0,3], aff[1,3], aff[2,3]]
spacing = [ get_spacing(aff, 0), get_spacing(aff, 1), get_spacing(aff, 2) ]
return ants.from_numpy( vol, origin=origin, spacing=spacing )
for metric in coreg_metrics :
print("t1 ",t1)
fixed = image_read( t1 )
moving = image_read( pet )
try :
metric_val = ants.create_ants_metric(
fixed = fixed,
moving= moving,
fixed_mask=ants.image_read( brain_mask_space_mri ),
moving_mask=ants.image_read( pet_brain_mask ),
metric_type=metric ).get_value()
except RuntimeError :
metric_val = np.NaN
temp = pd.DataFrame([['coreg',sid,ses,task,run,acq,rec,'01',metric,metric_val]],columns=df.columns )
sub_df = pd.concat([sub_df, temp])
if not isdefined( self.inputs.out_file) :
self.inputs.out_file = self._gen_output(self.inputs.sid, self.inputs.ses, self.inputs.task,self.inputs.run,self.inputs.rec,self.inputs.acq)
sub_df.to_csv(self.inputs.out_file, index=False)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined( self.inputs.out_file) :
self.inputs.out_file = self._gen_output(self.inputs.sid, self.inputs.ses, self.inputs.task,self.inputs.run,self.inputs.rec,self.inputs.acq)
outputs["out_file"] = self.inputs.out_file
return outputs
### Plot Metrics
# analysis sub ses task metric roi value
# 0 coreg 19 F 1 CC 1 0.717873
class plot_qcOutput(TraitedSpec):
out_file = traits.File(desc="Output file")
class plot_qcInput(BaseInterfaceInputSpec):
in_file = traits.File(desc="Input file")
out_file = traits.File(desc="Output file")
class plot_qcCommand (BaseInterface):
input_spec = plot_qcInput
output_spec = plot_qcOutput
def _gen_output(self, basefile="metrics.png"):
fname = ntpath.basename(basefile)
dname = os.getcwd()
return dname+ os.sep+fname
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
if not isdefined(self.inputs.out_file):
self.inputs.out_file = self._gen_output(self.inputs.in_file, self._suffix)
return super(plot_qcCommand, self)._parse_inputs(skip=skip)
def _run_interface(self, runtime):
df = pd.read_csv( self.inputs.in_file )
if "measure" in df.columns:
plot_type="measure"
elif "metric" in df.columns :
plot_type = "metric"
else:
print("Unrecognized data frame")
exit(1)
df["sub"]="sub: "+df["sub"].map(str)+" task: "+df["task"].map(str)+" ses: "+df["ses"].map(str)
print(df)
plt.clf()
fig, ax = plt.subplots()
plt.figure(1)
nROI = len(np.unique(df.roi))
if plot_type == "measure" :
unique_measure =np.unique(df.measure)
nMeasure = np.unique(unique_measure)
unique_metric = np.unique(df.metric)
nMetric = len(unique_metric)
for roi, i in zip(np.unique(df.roi), range(nROI)):
df0=df[ (df.roi==roi) ]
for metric in unique_metric :
x=df0.value[df.metric == metric]
if plot_type == "measure" :
sns.factorplot(x="metric", col="measure", y="value", kind="swarm", data=df0, legend=False, hue="sub")
else :
sns.factorplot(x="metric", y="value", data=df0, kind="swarm", hue="sub")
plt.ylabel('')
plt.xlabel('')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.ylim([-0.05,1.05])
plt.legend(bbox_to_anchor=(1.05, 1), loc="upper right", ncol=1, prop={'size': 6})
if not isdefined( self.inputs.out_file) :
self.inputs.out_file = self._gen_output()
print('Out file:', self.inputs.out_file)
#plt.tight_layout()
plt.savefig(self.inputs.out_file, bbox_inches="tight", dpi=300, width=2000)
plt.clf()
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined( self.inputs.out_file) :
self.inputs.out_file = self._gen_output()
outputs["out_file"] = self.inputs.out_file
return outputs
#########################
### Outlier measures ###
#########################
class outlier_measuresOutput(TraitedSpec):
out_file = traits.File(desc="Output file")
class outlier_measuresInput(BaseInterfaceInputSpec):
in_file = traits.File(desc="Input file")
out_file = traits.File(desc="Output file")
clobber = traits.Bool(desc="Overwrite output file", default=False)
class outlier_measuresCommand(BaseInterface):
input_spec = outlier_measuresInput
output_spec = outlier_measuresOutput
def _gen_output(self, fname ="measures.csv"):
dname = os.getcwd() + os.sep + fname
return dname
def _run_interface(self, runtime):
df = pd.read_csv( self.inputs.in_file )
out_columns=['sub','ses','task','roi','metric','measure', 'value']
df_out = pd.DataFrame(columns=out_columns)
for ses, ses_df in df.groupby(['ses']):
for task, task_df in ses_df.groupby(['task']):
for measure, measure_name in zip(outlier_measures.values(), outlier_measures.keys()):
for metric_name, metric_df in task_df.groupby(['metric']):
metricValues = metric_df.value.values
if len(metricValues.shape) == 1 : metricValues = metricValues.reshape(-1,1)
if 'cdf' in inspect.getargspec(measure).args :
if 'coreg' or 'pvc' in metric_df.analysis: cdf=True
else : cdf=False
m=np.array(measure(metricValues, cdf=cdf))
else : m=np.array(measure(metricValues))
if len(m.shape) > 1 : m = m.flatten()
r=pd.Series(m)
#Get column number of the current outlier measure Reindex the test_df from 0 to the number of rows it has
#Get the series with the calculate the distance measure for the current measure
df.index=range(df.shape[0])
df['value'] = r
df['measure'] = [measure_name] * df.shape[0]
df_out = pd.concat([df_out, df], axis=0)
if not isdefined( self.inputs.out_file ) :
self.inputs.out_file = self._gen_output()
df_out.to_csv(self.inputs.out_file,index=False)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined( self.inputs.out_file) :
self.inputs.out_file = self._gen_output()
outputs["out_file"] = self.inputs.out_file
return outputs
#############
# Visual QC #
#############
def groupLevel_visual_qc(opts, args):
#setup workflow
file_dir, fn =os.path.split( os.path.abspath(__file__) )
html_fn = file_dir + os.sep + 'qc.html'
if not os.path.exists(opts.targetDir+'/html'):
os.makedirs(opts.targetDir+'/html')
os.chdir(opts.targetDir+'/html')
print('Writing html dashboard',opts.targetDir+'/html')
if not os.path.exists('data'):
os.makedirs('data')
fn_list = glob(opts.targetDir+os.sep+opts.preproc_dir+os.sep+'*/visual_qc/*_summary.json')
#initialize and run class for building html dashboard
QCHTML(opts.targetDir, fn_list).build()
class visual_qcOutput(TraitedSpec):
pet_3d_gif = traits.File(desc="Output file")
pet_coreg_gif = traits.File(desc="Output file")
pet_coreg_edge_2_gif = traits.File(desc="Output file")
quant_labels_gif = traits.File( desc="Output File")
results_labels_gif = traits.File(exists=True, mandatory=False, desc="Output File")
pvc_labels_gif = traits.File( desc="Output File")
pvc_gif = traits.List(desc="Output file")
quant_gif = traits.File(desc="Output file")
out_json = traits.File(desc="Output file")
template_alignment_gif = traits.File(desc="Output file")
class visual_qcInput(BaseInterfaceInputSpec):
targetDir = traits.File(mandatory=True, desc="Target directory")
sourceDir = traits.File(mandatory=True, desc="Source directory")
pvc_method = traits.Str(desc="PVC method")
quant_method = traits.Str(desc="TKA method")
analysis_space = traits.Str(desc="Analysis Space")
pet_3d = traits.File(exists=True, mandatory=True, desc="PET image")
pet = traits.File(exists=True, mandatory=True, desc="PET image")
pet_space_mri = traits.File(exists=True, mandatory=True, desc="Output PETMRI image")
pet_brain_mask = traits.File(exists=True, mandatory=True, desc="Output PET Brain Mask")
mri_space_nat = traits.File(exists=True, mandatory=True, desc="Output T1 native space image")
template_space_mri = traits.File(exists=True, mandatory=True, desc="Output T1 native space image")
mri_brain_mask = traits.File(exists=True, mandatory=False, desc="MRI brain mask (t1 native space)")
results_labels = traits.File(exists=True, mandatory=False, desc="Label volume used for results stage")
quant_labels = traits.File( desc="Label volume used for quant stage")
pvc_labels = traits.File( desc="Label volume used for pvc stage")
t1_analysis_space = traits.File(exists=True, mandatory=True, desc="Output T1 in analysis space image")
quant_plot = traits.File(exists=True, mandatory=False, desc="Quantification Plot")
pvc = traits.File(exists=True, desc="Output PVC image")
quant = traits.File(exists=True, desc="Output TKA image")
sub =traits.Str(default_value='NA', mandatory=True)
ses=traits.Str(default_value='NA',usedefault=True)
task=traits.Str(default_value='NA',usedefault=True)
run=traits.Str(default_value='NA',usedefault=True)
pet_3d_gif = traits.File(desc="Output file")
pet_coreg_edge_2_gif = traits.File(desc="Output file")
pet_coreg_gif = traits.File(desc="Output file")
pvc_gif = traits.List(desc="Output file")
quant_gif = traits.File(desc="Output file")
template_alignment_gif = traits.File(desc="Output file")
quant_labels_gif = traits.File(desc="Output File")
results_labels_gif = traits.File(desc="Output File")
pvc_labels_gif = traits.File(desc="Output File")
out_json = traits.File(desc="Output file")
class visual_qcCommand(BaseInterface):
input_spec = visual_qcInput
output_spec = visual_qcOutput
def _gen_output(self, fname):
out_str = 'sub-'+self.inputs.sub
if self.inputs.ses != 'NA' and self.inputs.ses != '' :
out_str += '_'+ 'ses-' + self.inputs.ses
if self.inputs.task != 'NA' and self.inputs.task != '' :
out_str += '_'+'task-' + self.inputs.task
if self.inputs.run != 'NA' and self.inputs.run != '' :
out_str += '_'+'run-' + self.inputs.run
dname = os.getcwd() + os.sep + out_str + fname
return dname
def _run_interface(self, runtime):
#Set outputs
self.inputs.pet_3d_gif = self._gen_output('_pet_3d.gif')
self.inputs.pet_coreg_gif = self._gen_output('_coreg.gif')
self.inputs.pet_coreg_edge_2_gif = self._gen_output('_coreg_edge_2.gif')
self.inputs.results_labels_gif = self._gen_output('_results_labels.gif')
self.inputs.template_alignment_gif = self._gen_output('_template_alignment.gif')
self.inputs.out_json = self._gen_output('_summary.json')
d={'sub':self.inputs.sub, 'ses':self.inputs.ses,
'task':self.inputs.task, 'run':self.inputs.run,
'base':self._gen_output('')}
d['pet_3d']=self.inputs.pet_3d_gif
d['results_labels_gif']=self.inputs.results_labels_gif
d['coreg']=self.inputs.pet_coreg_gif
d['coreg_edge_2']=self.inputs.pet_coreg_edge_2_gif
visual_qc_images=[
ImageParam(self.inputs.pet_3d , self.inputs.pet_3d_gif, self.inputs.pet_brain_mask, cmap1=plt.cm.Greys, cmap2=plt.cm.Reds, alpha=[1.3], duration=300),
ImageParam(self.inputs.pet_space_mri , self.inputs.pet_coreg_gif, self.inputs.mri_space_nat, alpha=[1.55,1.70,1.85], duration=400, nframes=15 ),
ImageParam(self.inputs.pet_space_mri , self.inputs.pet_coreg_edge_2_gif, self.inputs.mri_space_nat, alpha=[1.4], duration=300, edge_2=1, cmap1=plt.cm.Greys, cmap2=plt.cm.Reds ),
# Results Labels
ImageParam(self.inputs.t1_analysis_space, self.inputs.results_labels_gif, self.inputs.results_labels, alpha=[1.4], duration=300, cmap1=plt.cm.Greys, cmap2=plt.cm.nipy_spectral ),
ImageParam(self.inputs.mri_space_nat, self.inputs.template_alignment_gif, self.inputs.template_space_mri, alpha=[1.4], duration=300, cmap1=plt.cm.Greys, cmap2=plt.cm.Reds )
]
if isdefined(self.inputs.pvc) :
dims = nib.load(self.inputs.pvc).shape
time_frames = 1 if len(dims) == 3 else dims[3]
#
self.inputs.pvc_gif = [ self._gen_output('_%d_pvc.gif'%f) for f in range(time_frames)]
visual_qc_images.append( ImageParam(self.inputs.pvc , self.inputs.pvc_gif, self.inputs.t1_analysis_space, alpha=[1.25], duration=300, time_frames=time_frames, ndim=len(dims), nframes=15,colorbar=True))
d['pvc'] = self.inputs.pvc_gif
# PVC Labels
self.inputs.pvc_labels_gif = self._gen_output('_pvc_labels.gif')
visual_qc_images.append(ImageParam(self.inputs.t1_analysis_space, self.inputs.pvc_labels_gif, self.inputs.pvc_labels, alpha=[1.4], duration=300, cmap1=plt.cm.Greys, cmap2=plt.cm.nipy_spectral ))
d['pvc_labels_gif']=self.inputs.pvc_labels_gif
if isdefined(self.inputs.quant) :
#
self.inputs.quant_gif = self._gen_output('_quant.gif')
visual_qc_images.append( ImageParam(self.inputs.quant , self.inputs.quant_gif, self.inputs.t1_analysis_space, alpha=[1.35], duration=300, colorbar=True ))
d['quant'] = self.inputs.quant_gif
d['quant_plot'] = self.inputs.quant_plot
# Quant Labels
print('\n\n\nQUANT LABELS\n\n\n')
self.inputs.quant_labels_gif = self._gen_output('_quant_labels.gif')
visual_qc_images.append(ImageParam(self.inputs.t1_analysis_space, self.inputs.quant_labels_gif, self.inputs.quant_labels, alpha=[1.4], duration=300, cmap1=plt.cm.Greys, cmap2=plt.cm.nipy_spectral ))
print(self.inputs.quant_labels_gif)
print(os.path.exists(self.inputs.quant_labels_gif))
d['quant_labels_gif']=self.inputs.quant_labels_gif
for image in visual_qc_images :
image.volume2gif()
json.dump( d, open(self.inputs.out_json,'w+'))
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs["pet_3d_gif"] = self.inputs.pet_3d_gif
outputs["pet_coreg_gif"] = self.inputs.pet_coreg_gif
outputs["pet_coreg_edge_2_gif"] = self.inputs.pet_coreg_edge_2_gif
outputs["template_alignment_gif"] = self.inputs.template_alignment_gif
outputs["results_labels_gif"] = self.inputs.results_labels_gif
outputs["pvc_labels_gif"] = self.inputs.pvc_labels_gif
outputs["quant_labels_gif"] = self.inputs.quant_labels_gif
outputs["out_json"] = self.inputs.out_json
if isdefined(self.inputs.pvc) :
outputs["pvc_gif"] = self.inputs.pvc_gif
if isdefined(self.inputs.quant) :
outputs["quant_gif"] = self.inputs.quant_gif
return outputs
def get_slices(vol, dim, i) :
if dim == 0:
r = vol[i, :, : ]
elif dim == 1 :
r = vol[ :, i, : ]
else :
r = vol[ :, :, i ]
return r
class ImageParam():
def __init__(self,in_fn,out_fn, overlay_fn=None, alpha=[1.], dpi=100, duration=100, cmap1=plt.cm.nipy_spectral, cmap2=plt.cm.gray, colorbar=False, edge_1=-1, edge_2=-1,nframes=15, time_frames=1, ndim=3):
self.in_fn = in_fn
self.out_fn = out_fn
self.alpha = alpha
self.dpi = dpi
self.overlay_fn = overlay_fn
self.duration = duration
self.cmap1 = cmap1
self.cmap2 = cmap2
self.colorbar=colorbar
self.edge_1 = edge_1
self.edge_2 = edge_2
self.nframes = nframes
self.ndim = ndim
self.time_frames=time_frames
def load_isotropic(self,in_fn,t=0):
vol_img, vol = load_3d(in_fn,t)
sep =[ get_spacing(vol_img.affine, i) for i in range(3) ]
min_unit=np.min(np.abs(sep))
#new_units=[min_unit*np.sign(sep[0]), min_unit*np.sign(sep[1]), min_unit*np.sign(sep[2]) ]
vol_img = resample_to_output(vol_img, [min_unit]*3,order=1 )
vol=vol_img.get_fdata()
return vol_img, vol
def volume2gif(self):
in_fn = self.in_fn
out_fn = self.out_fn
overlay_fn = self.overlay_fn
alpha = self.alpha
dpi = self.dpi
duration = self.duration
cmap1 = self.cmap1
cmap2 = self.cmap2
def apply_tfm(img, sigma):
if sigma >= 0 :
img = gaussian_filter(img, sigma)
img = np.sqrt(np.sum(np.abs(np.gradient(img)),axis=0))
img[ img < threshold_otsu(img) ] =0
return img
img = nib.load(in_fn)
ndim=len(img.shape)
full_vol = img.get_data()
vmin, vmax = (np.min(full_vol)*.02, np.max(full_vol)*0.98 )
tmax=1
if ndim == 4 :
tmax = nib.load(in_fn).shape[3]
for t in range(tmax) :
vol_img, vol = self.load_isotropic(in_fn,t)
vol = apply_tfm(vol,self.edge_1)
if overlay_fn != None :
overlay_img, overlay_vol = self.load_isotropic(overlay_fn)
overlay_vol = apply_tfm(overlay_vol,self.edge_2)
omin, omax = (np.min(overlay_vol), np.max(overlay_vol) )#np.percentile(vol, [1,99])
#np.percentile(vol, [1,99])
frames=[]
plt.clf()
fig = plt.figure()
axes=[fig.add_subplot(1, 3, ii) for ii in [1,2,3]]
axes[0].axis("off")
axes[1].axis("off")
axes[2].axis("off")
frame=[ axes[ii].imshow(get_slices(vol,ii,0), cmap=cmap1, animated=True,origin='lower', vmin=vmin, vmax=vmax, interpolation='gaussian' ) for ii in [0,1,2]]
nframes_per_alpha= self.nframes
total_frames=nframes_per_alpha * len(alpha)
def animate(i):
alpha_level = int(i / nframes_per_alpha)
ii = i % nframes_per_alpha
for dim in [0,1,2] :
idx = np.round(vol.shape[dim] * ii / (self.nframes+0.0)).astype(int)
r = get_slices(vol, dim, idx)
frame[dim] = axes[dim].imshow(r.T, cmap=cmap1, animated=True,origin='lower', vmin=vmin, vmax=vmax, interpolation='gaussian' )
if overlay_fn != None :
m = get_slices(overlay_vol, dim, idx)
frame[dim] = axes[dim].imshow(m.T,alpha=alpha[alpha_level], cmap=cmap2, vmin=omin, vmax=omax, interpolation='gaussian', origin='lower', animated=True)
return frame
if self.colorbar :
fig.colorbar(frame[2], shrink=0.35 )
plt.tight_layout()
stime=time.time()
ani = animation.FuncAnimation(fig, animate, frames=total_frames, interval=duration, blit=True, repeat_delay=1000)
if ndim == 4 :
out_fn = self.out_fn[t]
ani.save(out_fn, dpi=self.dpi) #, writer='imagemagick')
#print(time.time()-stime)
print('Writing', out_fn)
class QCHTML() :
'''
This class serves to create an html file with visual qc. It requires extracting images and gifs from the output data produced by a run of APPIAN.
Inputs:
fn_list : list of json files that contains paths to images/gifs used for qc
targetDir : output directory where html files will be saved
'''
def __init__(self, targetDir, fn_list):
self.fn_list = fn_list
self.targetDir=targetDir
self.d = {} #dictionary that keeps track of html files that are created for each of the scans in fn_list
for fn in fn_list :
#populate dictionary with name of scan
self.d[fn] = json.load(open(fn,'r'))
#set the html filename for this scan
self.d[fn]['html_fn'] = targetDir + '/html/' + os.path.basename(self.d[fn]['base'])+'.html'
def sidebar(self, vol_list):
'''
Create a sidebar with the names of the subjects. Allows user to switch between subjects.
'''
out_str=''
for i, fn in enumerate(self.fn_list) :
base = os.path.basename(self.d[fn]['base'])
stage_string = self.get_stage_list(vol_list, base)
subject_string = re.sub( '_', ' ', re.sub('-',': ', base))
out_str +='<div><buttonclass="w3-button w3-block w3-left-align" onclick="myAccFunc(\'accordion%d\')">%s </button>\n' % (i, subject_string)
out_str +='<div id=\'accordion%d\' class="w3-bar-block w3-hide w3-black w3-card-4">\n' % i
out_str +='%s\n' % stage_string
out_str +='\t\t\t</div>\n\t\t</div>\n'
out_str+='\t</div>\n'
return out_str
def get_stage_list(self, vol_list,base):
'read list of stages that were run based on vol_list'
stage_list=''
for i, (ID, H1, H2) in enumerate(vol_list) :
valid_id=False
for fn in self.fn_list :
try :
self.d[fn][ID] #check if valid entry in dictionary
valid_id=True
break
except KeyError :
continue
if valid_id and H1 != None :
var = './'+base+'.html#'+H1
stage_list += '\t\t<a href="%s" class="w3-bar-item w3-button">%s</a>\n' % (var, H1)
return stage_list
def build(self) :
'''
This method creates an html file for each scan in self.d. To do this it looks at which qc images/gifs
are defined for this scan.
'''
#qc_stages contains the qc stages. for each stage there is an ID for the stage, a header H1, and a subheader H2
qc_stages = (
('pet_3d','Initialization','3D Volume + Brain Mask'),
('template_alignment_gif','Template Alignment','MRI Vs aligned template'),
('coreg', 'Coregistration', 'PET + MRI Overlap' ),
('coreg_edge_2', None, 'PET + MRI Edge' ),
('results_labels_gif', 'Labels','Results'),
('pvc_labels_gif', None,'PVC'),
('quant_labels_gif', None, 'Reference Region'),
('pvc', 'Partial-volume Correction', 'Volume' ),
('quant', 'Quantification', 'Volume'),
('quant_plot', None, 'Time Activity Curves' ))
#copy some cores css files to target directory so that we can use their formatting
shutil.copy(_file_dir+'/w3.css', self.targetDir+'/html/w3.css')
shutil.copy(_file_dir+'/font-awesome.min.css', self.targetDir+'/html/font-awesome.min.css')
#for each scan in the dictionary
for fn, scan_dict in self.d.items() :
#here we create the output html file
with open(scan_dict['html_fn'],'w') as html_file:
#write some standard output to the html file that is common to all scans
html_file.writelines( self.start())
#write the sidebar to the html file.
#the sidebar contains information about which scans and stages were run
html_file.writelines(self.sidebar(qc_stages))
#single line that is common to all html files
html_file.writelines('<div style="margin-left:260px">\n')
#for each qc stages, write the html to include it in dashboard
for ID, H1, H2 in qc_stages :
self.vol(ID, scan_dict, html_file, h1=H1, h2=H2)
self.end(html_file)
def start(self):
out_str='''<!DOCTYPE html>
<html lang="en">
<head>
<title>APPIAN</title>
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="stylesheet" href="w3.css">
<meta charset="UTF-8">
<style>
body { font-family: Verdana, Helvetica, sans-serif; }
.w3-button{white-space:normal; padding:4px 8px }
.mySlides {display:none;}
</style>
</head>
<body>
<div class="w3-sidebar w3-bar-block w3-collapse w3-card w3-animate-left w3-black" style="width:200px;" id="mySidebar">
'''
return out_str
def vol(self, src, d, html_file, h1=None, h2=None):
try :
out_str=''
if h1 != None :
out_str+='<div id=%s> <h1>%s</h1>\n' % (h1, h1)
out_str+='<hr class="dashed">\n'
if h2 != None :
out_str+='<h2>'+h2+'</h2>\n'
#QC gifs can either be a filepath or a list of filepaths
#Lists of file paths are used for displaying volumes with multiple frames
if not type(d[src]) == list :
out_str+='<img src="'+'data/'+os.path.basename(d[src])+'" style="width:50%">\n'
shutil.copy(d[src], 'data/'+os.path.basename(d[src]))
else :
out_str += '<div class="w3-content w3-display-container">\n'
for fn in d[src] :
shutil.copy(fn, 'data/'+os.path.basename(fn))
out_str += '<img class=mySlides src=\"data/%s\" style="width:100">\n'%os.path.basename(fn)
out_str+='<button class="w3-button w3-black w3-display-left" onclick="plusDivs(-1)">❮</button>\n'
out_str+='<button class=\"w3-button w3-black w3-display-right\" onclick=\"plusDivs(1)\">❯</button>\n'
out_str+='</div>'
html_file.writelines(out_str)
except KeyError :
pass
def end(self, html_file):
out_str='''</div>
<script>
function myAccFunc(id) {
console.log(id)
var x = document.getElementById(id);
console.log(x)
console.log(x.className.indexOf("w3-show"))
if (x.className.indexOf("w3-show") == -1) {
x.className += " w3-show";
x.previousElementSibling.className =
x.previousElementSibling.className.replace("w3-black", "w3-red");
}
else {
x.previousElementSibling.className =
x.previousElementSibling.className.replace("w3-red", "w3-black");
}
}
var slideIndex = 1;
showDivs(slideIndex);
function plusDivs(n) {
showDivs(slideIndex += n);
}
function showDivs(n) {
var i;
var x = document.getElementsByClassName("mySlides");
if (n > x.length) {slideIndex = 1}
if (n < 1) {slideIndex = x.length} ;
for (i = 0; i < x.length; i++) {
x[i].style.display = "none";
}
x[slideIndex-1].style.display = "block";
}
</script>
</body>
</html>'''
html_file.writelines(out_str)
| mit |
Vimos/scikit-learn | sklearn/_build_utils/__init__.py | 80 | 2644 | """
Utilities useful during the build.
"""
# author: Andy Mueller, Gael Varoquaux
# license: BSD
from __future__ import division, print_function, absolute_import
import os
from distutils.version import LooseVersion
from numpy.distutils.system_info import get_info
DEFAULT_ROOT = 'sklearn'
CYTHON_MIN_VERSION = '0.23'
def get_blas_info():
def atlas_not_found(blas_info_):
def_macros = blas_info.get('define_macros', [])
for x in def_macros:
if x[0] == "NO_ATLAS_INFO":
# if x[1] != 1 we should have lapack
# how do we do that now?
return True
if x[0] == "ATLAS_INFO":
if "None" in x[1]:
# this one turned up on FreeBSD
return True
return False
blas_info = get_info('blas_opt', 0)
if (not blas_info) or atlas_not_found(blas_info):
cblas_libs = ['cblas']
blas_info.pop('libraries', None)
else:
cblas_libs = blas_info.pop('libraries', [])
return cblas_libs, blas_info
def build_from_c_and_cpp_files(extensions):
"""Modify the extensions to build from the .c and .cpp files.
This is useful for releases, this way cython is not required to
run python setup.py install.
"""
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
if extension.language == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
sources.append(sfile)
extension.sources = sources
def maybe_cythonize_extensions(top_path, config):
"""Tweaks for building extensions between release and development mode."""
is_release = os.path.exists(os.path.join(top_path, 'PKG-INFO'))
if is_release:
build_from_c_and_cpp_files(config.ext_modules)
else:
message = ('Please install cython with a version >= {0} in order '
'to build a scikit-learn development version.').format(
CYTHON_MIN_VERSION)
try:
import Cython
if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION:
message += ' Your version of Cython was {0}.'.format(
Cython.__version__)
raise ValueError(message)
from Cython.Build import cythonize
except ImportError as exc:
exc.args += (message,)
raise
config.ext_modules = cythonize(config.ext_modules)
| bsd-3-clause |
impactlab/jps-handoff | webapp/viewer/models/datapoints.py | 1 | 3939 | from django.db import models
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.contenttypes import generic
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.signals import request_finished
import string, os, fnmatch, csv, datetime, pytz, json, math
import pandas as pd
import numpy as np
class EventDataPoint(models.Model):
meter = models.ForeignKey('Meter', related_name='events')
ts = models.DateTimeField()
event = models.CharField(max_length=200)
class ProfileDataPoint(models.Model):
meter = models.ForeignKey('Meter', related_name='profile_points')
ts = models.DateTimeField()
kwh = models.FloatField()
raw = models.FloatField()
class MeasurementDataPoint(models.Model):
meter = models.ForeignKey('Meter', related_name='measurement_points')
ts = models.DateTimeField()
time_of_last_interrogation = models.DateTimeField(null=True)
time_of_last_outage = models.DateTimeField(null=True)
phase_a_voltage = models.FloatField()
phase_a_current = models.FloatField()
phase_a_current_angle = models.FloatField()
phase_a_dc_detect = models.FloatField(null=True)
phase_b_voltage = models.FloatField()
phase_b_voltage_angle = models.FloatField()
phase_b_current = models.FloatField()
phase_b_current_angle = models.FloatField()
phase_b_dc_detect = models.FloatField(null=True)
phase_c_voltage = models.FloatField()
phase_c_voltage_angle = models.FloatField()
phase_c_current = models.FloatField()
phase_c_current_angle = models.FloatField()
phase_c_dc_detect = models.FloatField(null=True)
abc_phase_rotation = models.IntegerField(null=True)
daylight_savings_time_configured = models.NullBooleanField()
low_battery_error = models.NullBooleanField()
metrology_communications_fatal_error = models.NullBooleanField()
inactive_phase = models.NullBooleanField()
file_system_fatal_error = models.NullBooleanField()
voltage_deviation = models.NullBooleanField()
phase_angle_displacement = models.NullBooleanField()
slc_error = models.NullBooleanField()
tou_schedule_error = models.NullBooleanField()
reverse_power_flow_error = models.NullBooleanField()
register_full_scale_exceeded_error = models.NullBooleanField()
epf_data_fatal_error = models.NullBooleanField()
demand_threshold_exceeded_error = models.NullBooleanField()
metrology_communications_error = models.NullBooleanField()
ram_fatal_error = models.NullBooleanField()
phase_loss_error = models.NullBooleanField()
mass_memory_error = models.NullBooleanField()
cross_phase_flow = models.NullBooleanField()
current_waveform_distorsion = models.NullBooleanField()
mcu_flash_fatal_error = models.NullBooleanField()
data_flash_fatal_error = models.NullBooleanField()
clock_sync_error = models.NullBooleanField()
site_scan_error = models.NullBooleanField()
diag_count_2 = models.IntegerField()
diag_count_3 = models.IntegerField()
diag_count_4 = models.IntegerField()
diag_5_phase_b_count = models.IntegerField()
diag_5_phase_a_count = models.IntegerField()
diag_5_phase_c_count = models.IntegerField()
times_programmed_count = models.IntegerField(null=True)
early_power_fail_count = models.IntegerField(null=True)
power_outage_count = models.IntegerField()
good_battery_reading = models.IntegerField(null=True)
demand_reset_count = models.IntegerField()
demand_interval_length = models.IntegerField(null=True)
current_battery_reading = models.IntegerField(null=True)
current_season = models.IntegerField(null=True)
days_since_demand_reset = models.IntegerField(null=True)
days_since_last_test = models.IntegerField(null=True)
days_on_battery = models.IntegerField(null=True)
service_type_detected = models.IntegerField(null=True)
diag_count_1 = models.IntegerField()
diag_count_5 = models.IntegerField()
diag_count_6 = models.IntegerField(null=True)
| mit |
JaviMerino/trappy | trappy/stats/grammar.py | 1 | 17033 | # Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Grammar module allows the user to easily define relations
between data events and perform basic logical and arithmetic
operations on the data. The parser also handles super-indexing
and variable forwarding.
"""
from pyparsing import Literal, delimitedList, Optional, oneOf, nums,\
alphas, alphanums, Forward, Word, opAssoc, operatorPrecedence, Combine, Group
import importlib
import pandas as pd
import types
import numpy as np
from trappy.stats.Topology import Topology
from trappy.stats import StatConf
from trappy.utils import handle_duplicate_index
def parse_num(tokens):
"""Parser function for numerical data
:param tokens: The grammar tokens
:type tokens: list
"""
return float(tokens[0])
# Suppressed Literals
LPAREN = Literal("(").suppress()
RPAREN = Literal(")").suppress()
COLON = Literal(":").suppress()
EXP_START = Literal("[").suppress()
EXP_END = Literal("]").suppress()
# Grammar Tokens
# DataFrame Accessor
INTEGER = Combine(Optional(oneOf("+ -")) + Word(nums))\
.setParseAction(parse_num)
REAL = Combine(Optional(oneOf("+ -")) + Word(nums) + "." +
Optional(Word(nums)) +
Optional(oneOf("e E") + Optional(oneOf("+ -")) + Word(nums)))\
.setParseAction(parse_num)
# Generic Identifier
IDENTIFIER = Word(alphas + '_', alphanums + '_')
# Python Like Function Name
FUNC_NAME = delimitedList(IDENTIFIER, delim=".", combine=True)
# Exponentiation operators
EXPONENTIATION_OPS = "**"
# Unary Operators
UNARY_OPS = oneOf("+ -")
# Multiplication/Division Operators
MULT_OPS = oneOf("* / // %")
# Addition/Subtraction Operators
SUM_OPS = oneOf("+ -")
# Relational Operators
REL_OPS = oneOf("> < >= <= == !=")
# Logical Operators
LOGICAL_OPS = oneOf("&& || & |")
# Operator to function mapping
OPERATOR_MAP = {
"+": lambda a, b: a + b,
"-": lambda a, b: a - b,
"*": lambda a, b: a * b,
"/": lambda a, b: a / b,
"//": lambda a, b: a // b,
"%": lambda a, b: a % b,
"**": lambda a, b: a ** b,
">": lambda a, b: a > b,
"<": lambda a, b: a < b,
">=": lambda a, b: a >= b,
"<=": lambda a, b: a <= b,
"||": lambda a, b: a or b,
"&&": lambda a, b: a and b,
"|": lambda a, b: a | b,
"==": lambda a, b: a == b,
"!=": lambda a, b: a != b,
"&": lambda a, b: a & b
}
def eval_unary_op(tokens):
"""Unary Op Evaluation
:param tokens: The grammar tokens
:type tokens: list
"""
params = tokens[0]
if params[0] == "-":
return -1 * params[1]
else:
return params[1]
def iterate_binary_ops(tokens):
"""An iterator for Binary Operation tokens
:param tokens: The grammar tokens
:type tokens: list
"""
itr = iter(tokens)
while True:
try:
yield(itr.next(), itr.next())
except StopIteration:
break
def eval_binary_op(tokens):
"""Evaluate Binary operators
:param tokens: The grammar tokens
:type tokens: list
"""
params = tokens[0]
result = params[0]
for opr, val in iterate_binary_ops(params[1:]):
result = OPERATOR_MAP[opr](result, val)
return result
def str_to_attr(cls_str):
"""Bring the attr specified into current scope
and return a handler
:param cls_str: A string representing the class
:type cls_str: str
:return: A class object
"""
attr_name = cls_str.rsplit(".", 1)
if len(attr_name) == 2:
module_name, attr_name = attr_name
mod = importlib.import_module(module_name)
return getattr(mod, attr_name)
else:
attr_name = attr_name[0]
return globals()[attr_name]
def get_parse_expression(parse_func, parse_var_id):
"""return a parse expression with for the
input parseActions
"""
var_id = Group(
FUNC_NAME + COLON + IDENTIFIER) | REAL | INTEGER | IDENTIFIER
var_id.setParseAction(parse_var_id)
# Forward declaration for an Arithmetic Expression
arith_expr = Forward()
func_call = Group(
FUNC_NAME +
LPAREN +
Optional(
Group(
delimitedList(arith_expr))) +
RPAREN)
# An Arithmetic expression can have a var_id or
# a function call as an operand
# pylint: disable=expression-not-assigned
arith_expr << operatorPrecedence(func_call | var_id,
[
(EXPONENTIATION_OPS, 2, opAssoc.LEFT,
eval_binary_op),
(UNARY_OPS, 1,
opAssoc.RIGHT, eval_unary_op),
(MULT_OPS, 2, opAssoc.LEFT,
eval_binary_op),
(SUM_OPS, 2, opAssoc.LEFT,
eval_binary_op),
(REL_OPS, 2, opAssoc.LEFT,
eval_binary_op),
(LOGICAL_OPS, 2,
opAssoc.LEFT, eval_binary_op)
])
# pylint: enable=expression-not-assigned
# Argument expression for a function call
# An argument to a function can be an
# IDENTIFIER, Arithmetic expression, REAL number, INTEGER or a
# Function call itself
func_call.setParseAction(parse_func)
return arith_expr
class Parser(object):
"""A parser class for solving simple
data accesses and super-indexing data
:param data: Trace Object
:type data: instance of :mod:`trappy.ftrace.BareTrace` or a child
class (like :mod:`trappy.ftrace.FTrace`)
:param pvars: A dictionary of variables that need to be
accessed from within the grammar
:type pvars: dict
:param method: The method to be used for reindexing data
This can be one of the standas :mod:`pandas.DataFrame`
methods (eg. pad, bfill, nearest). The default is pad
or use the last valid observation.
:type method: str
:param limit: The number of indices a value will be propagated
when reindexing. The default is None
:type limit: int
:param fill: Whether to fill the NaNs in the data.
The default value is True.
:type fill: bool
:param window: A window of time in which to apply the data
accesses. By default the data accesses happen accross the
whole trace. With the window parameter you can limit it to a
window of time inside the trace. The first element of the
tuple is the starting time and the second the ending time (set
to None for end of trace).
:type window: tuple
- **Operators**
+----------------+----------------------+---------------+
| Operation | operator | Associativity |
+================+======================+===============+
| Exponentiation | \*\* | Left |
+----------------+----------------------+---------------+
|Unary | \- | Right |
+----------------+----------------------+---------------+
| Multiply/Divide| \*, /, //, % | Left |
+----------------+----------------------+---------------+
| Add/Subtract | +, \-, | Left |
+----------------+----------------------+---------------+
| Comparison | >, <, >=, <=, ==, != | Left |
+----------------+----------------------+---------------+
| Logical | &&, ||, \|, & | Left |
+----------------+----------------------+---------------+
- **Data Accessors**
Since the goal of the grammar is to provide an
easy language to access and compare data
from a :mod:`trappy.trace.FTrace` object. The parser provides
a simple notation to access this data.
*Statically Defined Events*
::
import trappy
from trappy.stats.grammar import Parser
trace = trappy.FTrace("path/to/trace/file")
parser = Parser(trace)
parser.solve("trappy.thermal.Thermal:temp * 2")
*Aliasing*
::
import trappy
from trappy.stats.grammar import Parser
pvars = {}
pvars["THERMAL"] = trappy.thermal.Thermal
trace = trappy.FTrace("path/to/trace/file")
parser = Parser(trace)
parser.solve("THERMAL:temp * 2")
*Using Event Name*
::
import trappy
from trappy.stats.grammar import Parser
trace = trappy.FTrace("path/to/trace/file")
parser = Parser(trace)
parser.solve("thermal:temp * 2")
The event :mod:`trappy.thermal.Thermal` is aliased
as **THERMAL** in the grammar
*Dynamic Events*
::
import trappy
from trappy.stats.grammar import Parser
# Register Dynamic Event
cls = trappy.register_dynamic_ftrace("my_unique_word", "event_name")
pvars = {}
pvars["CUSTOM"] = cls
trace = trappy.FTrace("path/to/trace/file")
parser = Parser(trace)
parser.solve("CUSTOM:col * 2")
.. seealso:: :mod:`trappy.dynamic.register_dynamic_ftrace`
"""
def __init__(self, data, pvars=None, window=(0, None), **kwargs):
if pvars is None:
pvars = {}
self.data = data
self._pvars = pvars
self._accessor = Group(
FUNC_NAME + COLON + IDENTIFIER).setParseAction(self._pre_process)
self._inspect = Group(
FUNC_NAME + COLON + IDENTIFIER).setParseAction(self._parse_for_info)
self._parse_expr = get_parse_expression(
self._parse_func, self._parse_var_id)
self._agg_df = pd.DataFrame()
self._pivot_set = set()
self._limit = kwargs.get("limit", StatConf.REINDEX_LIMIT_DEFAULT)
self._method = kwargs.get("method", StatConf.REINDEX_METHOD_DEFAULT)
self._fill = kwargs.get("fill", StatConf.NAN_FILL_DEFAULT)
self._window = window
def solve(self, expr):
"""Parses and solves the input expression
:param expr: The input expression
:type expr: str
:return: The return type may vary depending on
the expression. For example:
**Vector**
::
import trappy
from trappy.stats.grammar import Parser
trace = trappy.FTrace("path/to/trace/file")
parser = Parser(trace)
parser.solve("trappy.thermal.Thermal:temp * 2")
**Scalar**
::
import trappy
from trappy.stats.grammar import Parser
trace = trappy.FTrace("path/to/trace/file")
parser = Parser(trace)
parser.solve("numpy.mean(trappy.thermal.Thermal:temp)")
**Vector Mask**
::
import trappy
from trappy.stats.grammar import Parser
trace = trappy.FTrace("path/to/trace/file")
parser = Parser(trace)
parser.solve("trappy.thermal.Thermal:temp > 65000")
"""
# Pre-process accessors for indexing
self._accessor.searchString(expr)
return self._parse_expr.parseString(expr)[0]
"""
# Pre-process accessors for indexing
self._accessor.searchString(expr)
return self._parse_expr.parseString(expr)[0]
"""
# Pre-process accessors for indexing
self._accessor.searchString(expr)
return self._parse_expr.parseString(expr)[0]
def _pivot(self, cls, column):
"""Pivot Data for concatenation"""
data_frame = self._get_data_frame(cls)
data_frame = handle_duplicate_index(data_frame)
new_index = self._agg_df.index.union(data_frame.index)
if hasattr(cls, "pivot") and cls.pivot:
pivot = cls.pivot
pivot_vals = list(np.unique(data_frame[pivot].values))
data = {}
for val in pivot_vals:
data[val] = data_frame[data_frame[pivot] == val][[column]]
if len(self._agg_df):
data[val] = data[val].reindex(
index=new_index,
method=self._method,
limit=self._limit)
return pd.concat(data, axis=1).swaplevel(0, 1, axis=1)
if len(self._agg_df):
data_frame = data_frame.reindex(
index=new_index,
method=self._method,
limit=self._limit)
return pd.concat({StatConf.GRAMMAR_DEFAULT_PIVOT: data_frame[
[column]]}, axis=1).swaplevel(0, 1, axis=1)
def _pre_process(self, tokens):
"""Pre-process accessors for super-indexing"""
params = tokens[0]
if params[1] in self._agg_df.columns:
return self._agg_df[params[1]]
cls = params[0]
column = params[1]
if cls in self._pvars:
cls = self._pvars[cls]
elif cls in self.data.class_definitions:
cls = self.data.class_definitions[cls]
else:
cls = str_to_attr(cls)
data_frame = self._pivot(cls, column)
self._agg_df = pd.concat(
[self._agg_df, data_frame], axis=1)
if self._fill:
self._agg_df = self._agg_df.fillna(method="pad")
return self._agg_df[params[1]]
def _parse_for_info(self, tokens):
"""Parse Action for inspecting data accessors"""
params = tokens[0]
cls = params[0]
column = params[1]
info = {}
info["pivot"] = None
info["pivot_values"] = None
if cls in self._pvars:
cls = self._pvars[cls]
elif cls in self.data.class_definitions:
cls = self.data.class_definitions[cls]
else:
cls = str_to_attr(cls)
data_frame = self._get_data_frame(cls)
info["class"] = cls
info["length"] = len(data_frame)
if hasattr(cls, "pivot") and cls.pivot:
info["pivot"] = cls.pivot
info["pivot_values"] = list(np.unique(data_frame[cls.pivot]))
info["column"] = column
info["column_present"] = column in data_frame.columns
return info
def _parse_var_id(self, tokens):
"""A function to parse a variable identifier
"""
params = tokens[0]
try:
return float(params)
except (ValueError, TypeError):
try:
return self._pvars[params]
except KeyError:
return self._agg_df[params[1]]
def _parse_func(self, tokens):
"""A function to parse a function string"""
params = tokens[0]
func_name = params[0]
if func_name in self._pvars and isinstance(
self._pvars[func_name],
types.FunctionType):
func = self._pvars[func_name]
else:
func = str_to_attr(params[0])
return func(*params[1])
def _get_data_frame(self, cls):
"""Get the data frame from the BareTrace object, applying the window
if set"""
data_frame = getattr(self.data, cls.name).data_frame
if self._window[1] is None:
data_frame = data_frame.loc[self._window[0]:]
else:
data_frame = data_frame.loc[self._window[0]:self._window[1]]
return data_frame
def ref(self, mask):
"""Reference super indexed data with a boolean mask
:param mask: A boolean :mod:`pandas.Series` that
can be used to reference the aggregated data in
the parser
:type mask: :mod:`pandas.Series`
:return: aggregated_data[mask]
"""
return self._agg_df[mask]
def inspect(self, accessor):
"""A function to inspect the accessor for information
:param accessor: A data accessor of the format
<event>:<column>
:type accessor: str
:return: A dictionary of information
"""
return self._inspect.parseString(accessor)[0]
| apache-2.0 |
zooniverse/aggregation | experimental/serengeti/IAAI/weight.py | 2 | 1437 | #!/usr/bin/env python
import csv
#ASG000pt52,merxator wildebeest
photos = {}
beta = 1
def weight(TP,TN,FP,FN):
if (TP+beta*TN + FP+FN) == 0:
return -1
return (TP+beta*TN)/float(TP+beta*TN + FP+FN)
searchFor = "zebra"
with open("/home/greg/Databases/goldMergedSerengeti.csv") as f:
reader = csv.reader(f,delimiter="\t")
for meta,speciesList in reader:
photoID,userID = meta.split(",")
animals = [s.split(":")[0] for s in speciesList.split(",")]
if userID == "pjeversman":
if searchFor in animals:
photos[photoID] = True
else:
photos[photoID] = False
TP = 0.
TN = 0.
FP = 0.
FN = 0.
weightValues = []
with open("/home/greg/Downloads/Expert_Classifications_For_4149_S4_Captures.csv") as f:
reader = csv.reader(f)
next(reader, None)
for photoID,image,count,s1,s2,s3 in reader:
if photoID in photos:
if (searchFor in [s1,s2,s3]):
if photos[photoID]:
TP += 1
else:
FN += 1
else:
if photos[photoID]:
FP += 1
else:
TN += 1
weightValues.append(weight(TP,TN,FP,FN))
print TP,TN,FP,FN
print photos
import matplotlib.pyplot as plt
plt.plot(range(len(weightValues)),weightValues)
plt.ylim(0.5,1.1)
plt.xlabel(str(beta))
plt.show() | apache-2.0 |
simon-pepin/scikit-learn | sklearn/ensemble/gradient_boosting.py | 126 | 65552 | """Gradient Boosted Regression Trees
This module contains methods for fitting gradient boosted regression trees for
both classification and regression.
The module structure is the following:
- The ``BaseGradientBoosting`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ in the concrete ``LossFunction`` used.
- ``GradientBoostingClassifier`` implements gradient boosting for
classification problems.
- ``GradientBoostingRegressor`` implements gradient boosting for
regression problems.
"""
# Authors: Peter Prettenhofer, Scott White, Gilles Louppe, Emanuele Olivetti,
# Arnaud Joly
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
from abc import ABCMeta, abstractmethod
from time import time
import numbers
import numpy as np
from scipy import stats
from .base import BaseEnsemble
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..utils import check_random_state, check_array, check_X_y, column_or_1d
from ..utils import check_consistent_length, deprecated
from ..utils.extmath import logsumexp
from ..utils.fixes import expit, bincount
from ..utils.stats import _weighted_percentile
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..tree.tree import DecisionTreeRegressor
from ..tree._tree import DTYPE, TREE_LEAF
from ..tree._tree import PresortBestSplitter
from ..tree._tree import FriedmanMSE
from ._gradient_boosting import predict_stages
from ._gradient_boosting import predict_stage
from ._gradient_boosting import _random_sample_mask
class QuantileEstimator(BaseEstimator):
"""An estimator predicting the alpha-quantile of the training targets."""
def __init__(self, alpha=0.9):
if not 0 < alpha < 1.0:
raise ValueError("`alpha` must be in (0, 1.0) but was %r" % alpha)
self.alpha = alpha
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.quantile = stats.scoreatpercentile(y, self.alpha * 100.0)
else:
self.quantile = _weighted_percentile(y, sample_weight, self.alpha * 100.0)
def predict(self, X):
check_is_fitted(self, 'quantile')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.quantile)
return y
class MeanEstimator(BaseEstimator):
"""An estimator predicting the mean of the training targets."""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
self.mean = np.mean(y)
else:
self.mean = np.average(y, weights=sample_weight)
def predict(self, X):
check_is_fitted(self, 'mean')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.mean)
return y
class LogOddsEstimator(BaseEstimator):
"""An estimator predicting the log odds ratio."""
scale = 1.0
def fit(self, X, y, sample_weight=None):
# pre-cond: pos, neg are encoded as 1, 0
if sample_weight is None:
pos = np.sum(y)
neg = y.shape[0] - pos
else:
pos = np.sum(sample_weight * y)
neg = np.sum(sample_weight * (1 - y))
if neg == 0 or pos == 0:
raise ValueError('y contains non binary labels.')
self.prior = self.scale * np.log(pos / neg)
def predict(self, X):
check_is_fitted(self, 'prior')
y = np.empty((X.shape[0], 1), dtype=np.float64)
y.fill(self.prior)
return y
class ScaledLogOddsEstimator(LogOddsEstimator):
"""Log odds ratio scaled by 0.5 -- for exponential loss. """
scale = 0.5
class PriorProbabilityEstimator(BaseEstimator):
"""An estimator predicting the probability of each
class in the training data.
"""
def fit(self, X, y, sample_weight=None):
if sample_weight is None:
sample_weight = np.ones_like(y, dtype=np.float64)
class_counts = bincount(y, weights=sample_weight)
self.priors = class_counts / class_counts.sum()
def predict(self, X):
check_is_fitted(self, 'priors')
y = np.empty((X.shape[0], self.priors.shape[0]), dtype=np.float64)
y[:] = self.priors
return y
class ZeroEstimator(BaseEstimator):
"""An estimator that simply predicts zero. """
def fit(self, X, y, sample_weight=None):
if np.issubdtype(y.dtype, int):
# classification
self.n_classes = np.unique(y).shape[0]
if self.n_classes == 2:
self.n_classes = 1
else:
# regression
self.n_classes = 1
def predict(self, X):
check_is_fitted(self, 'n_classes')
y = np.empty((X.shape[0], self.n_classes), dtype=np.float64)
y.fill(0.0)
return y
class LossFunction(six.with_metaclass(ABCMeta, object)):
"""Abstract base class for various loss functions.
Attributes
----------
K : int
The number of regression trees to be induced;
1 for regression and binary classification;
``n_classes`` for multi-class classification.
"""
is_multi_class = False
def __init__(self, n_classes):
self.K = n_classes
def init_estimator(self):
"""Default ``init`` estimator for loss function. """
raise NotImplementedError()
@abstractmethod
def __call__(self, y, pred, sample_weight=None):
"""Compute the loss of prediction ``pred`` and ``y``. """
@abstractmethod
def negative_gradient(self, y, y_pred, **kargs):
"""Compute the negative gradient.
Parameters
---------
y : np.ndarray, shape=(n,)
The target labels.
y_pred : np.ndarray, shape=(n,):
The predictions.
"""
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Update the terminal regions (=leaves) of the given tree and
updates the current predictions of the model. Traverses tree
and invokes template method `_update_terminal_region`.
Parameters
----------
tree : tree.Tree
The tree object.
X : ndarray, shape=(n, m)
The data array.
y : ndarray, shape=(n,)
The target labels.
residual : ndarray, shape=(n,)
The residuals (usually the negative gradient).
y_pred : ndarray, shape=(n,)
The predictions.
sample_weight : ndarray, shape=(n,)
The weight of each sample.
sample_mask : ndarray, shape=(n,)
The sample mask to be used.
learning_rate : float, default=0.1
learning rate shrinks the contribution of each tree by
``learning_rate``.
k : int, default 0
The index of the estimator being updated.
"""
# compute leaf for each sample in ``X``.
terminal_regions = tree.apply(X)
# mask all which are not in sample mask.
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
# update each leaf (= perform line search)
for leaf in np.where(tree.children_left == TREE_LEAF)[0]:
self._update_terminal_region(tree, masked_terminal_regions,
leaf, X, y, residual,
y_pred[:, k], sample_weight)
# update predictions (both in-bag and out-of-bag)
y_pred[:, k] += (learning_rate
* tree.value[:, 0, 0].take(terminal_regions, axis=0))
@abstractmethod
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Template method for updating terminal regions (=leaves). """
class RegressionLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for regression loss functions. """
def __init__(self, n_classes):
if n_classes != 1:
raise ValueError("``n_classes`` must be 1 for regression but "
"was %r" % n_classes)
super(RegressionLossFunction, self).__init__(n_classes)
class LeastSquaresError(RegressionLossFunction):
"""Loss function for least squares (LS) estimation.
Terminal regions need not to be updated for least squares. """
def init_estimator(self):
return MeanEstimator()
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.mean((y - pred.ravel()) ** 2.0)
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * ((y - pred.ravel()) ** 2.0)))
def negative_gradient(self, y, pred, **kargs):
return y - pred.ravel()
def update_terminal_regions(self, tree, X, y, residual, y_pred,
sample_weight, sample_mask,
learning_rate=1.0, k=0):
"""Least squares does not need to update terminal regions.
But it has to update the predictions.
"""
# update predictions
y_pred[:, k] += learning_rate * tree.predict(X).ravel()
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
pass
class LeastAbsoluteError(RegressionLossFunction):
"""Loss function for least absolute deviation (LAD) regression. """
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
if sample_weight is None:
return np.abs(y - pred.ravel()).mean()
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.abs(y - pred.ravel())))
def negative_gradient(self, y, pred, **kargs):
"""1.0 if y - pred > 0.0 else -1.0"""
pred = pred.ravel()
return 2.0 * (y - pred > 0.0) - 1.0
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""LAD updates terminal regions to median estimates. """
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
diff = y.take(terminal_region, axis=0) - pred.take(terminal_region, axis=0)
tree.value[leaf, 0, 0] = _weighted_percentile(diff, sample_weight, percentile=50)
class HuberLossFunction(RegressionLossFunction):
"""Huber loss function for robust regression.
M-Regression proposed in Friedman 2001.
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
"""
def __init__(self, n_classes, alpha=0.9):
super(HuberLossFunction, self).__init__(n_classes)
self.alpha = alpha
self.gamma = None
def init_estimator(self):
return QuantileEstimator(alpha=0.5)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
gamma = self.gamma
if gamma is None:
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
if sample_weight is None:
sq_loss = np.sum(0.5 * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * (np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / y.shape[0]
else:
sq_loss = np.sum(0.5 * sample_weight[gamma_mask] * diff[gamma_mask] ** 2.0)
lin_loss = np.sum(gamma * sample_weight[~gamma_mask] *
(np.abs(diff[~gamma_mask]) - gamma / 2.0))
loss = (sq_loss + lin_loss) / sample_weight.sum()
return loss
def negative_gradient(self, y, pred, sample_weight=None, **kargs):
pred = pred.ravel()
diff = y - pred
if sample_weight is None:
gamma = stats.scoreatpercentile(np.abs(diff), self.alpha * 100)
else:
gamma = _weighted_percentile(np.abs(diff), sample_weight, self.alpha * 100)
gamma_mask = np.abs(diff) <= gamma
residual = np.zeros((y.shape[0],), dtype=np.float64)
residual[gamma_mask] = diff[gamma_mask]
residual[~gamma_mask] = gamma * np.sign(diff[~gamma_mask])
self.gamma = gamma
return residual
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
sample_weight = sample_weight.take(terminal_region, axis=0)
gamma = self.gamma
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
median = _weighted_percentile(diff, sample_weight, percentile=50)
diff_minus_median = diff - median
tree.value[leaf, 0] = median + np.mean(
np.sign(diff_minus_median) *
np.minimum(np.abs(diff_minus_median), gamma))
class QuantileLossFunction(RegressionLossFunction):
"""Loss function for quantile regression.
Quantile regression allows to estimate the percentiles
of the conditional distribution of the target.
"""
def __init__(self, n_classes, alpha=0.9):
super(QuantileLossFunction, self).__init__(n_classes)
assert 0 < alpha < 1.0
self.alpha = alpha
self.percentile = alpha * 100.0
def init_estimator(self):
return QuantileEstimator(self.alpha)
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
diff = y - pred
alpha = self.alpha
mask = y > pred
if sample_weight is None:
loss = (alpha * diff[mask].sum() +
(1.0 - alpha) * diff[~mask].sum()) / y.shape[0]
else:
loss = ((alpha * np.sum(sample_weight[mask] * diff[mask]) +
(1.0 - alpha) * np.sum(sample_weight[~mask] * diff[~mask])) /
sample_weight.sum())
return loss
def negative_gradient(self, y, pred, **kargs):
alpha = self.alpha
pred = pred.ravel()
mask = y > pred
return (alpha * mask) - ((1.0 - alpha) * ~mask)
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
diff = (y.take(terminal_region, axis=0)
- pred.take(terminal_region, axis=0))
sample_weight = sample_weight.take(terminal_region, axis=0)
val = _weighted_percentile(diff, sample_weight, self.percentile)
tree.value[leaf, 0] = val
class ClassificationLossFunction(six.with_metaclass(ABCMeta, LossFunction)):
"""Base class for classification loss functions. """
def _score_to_proba(self, score):
"""Template method to convert scores to probabilities.
the does not support probabilites raises AttributeError.
"""
raise TypeError('%s does not support predict_proba' % type(self).__name__)
@abstractmethod
def _score_to_decision(self, score):
"""Template method to convert scores to decisions.
Returns int arrays.
"""
class BinomialDeviance(ClassificationLossFunction):
"""Binomial deviance loss function for binary classification.
Binary classification is a special case; here, we only need to
fit one tree instead of ``n_classes`` trees.
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(BinomialDeviance, self).__init__(1)
def init_estimator(self):
return LogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
"""Compute the deviance (= 2 * negative log-likelihood). """
# logaddexp(0, v) == log(1.0 + exp(v))
pred = pred.ravel()
if sample_weight is None:
return -2.0 * np.mean((y * pred) - np.logaddexp(0.0, pred))
else:
return (-2.0 / sample_weight.sum() *
np.sum(sample_weight * ((y * pred) - np.logaddexp(0.0, pred))))
def negative_gradient(self, y, pred, **kargs):
"""Compute the residual (= negative gradient). """
return y - expit(pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step.
our node estimate is given by:
sum(w * (y - prob)) / sum(w * prob * (1 - prob))
we take advantage that: y - prob = residual
"""
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
denominator = np.sum(sample_weight * (y - residual) * (1 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class MultinomialDeviance(ClassificationLossFunction):
"""Multinomial deviance loss function for multi-class classification.
For multi-class classification we need to fit ``n_classes`` trees at
each stage.
"""
is_multi_class = True
def __init__(self, n_classes):
if n_classes < 3:
raise ValueError("{0:s} requires more than 2 classes.".format(
self.__class__.__name__))
super(MultinomialDeviance, self).__init__(n_classes)
def init_estimator(self):
return PriorProbabilityEstimator()
def __call__(self, y, pred, sample_weight=None):
# create one-hot label encoding
Y = np.zeros((y.shape[0], self.K), dtype=np.float64)
for k in range(self.K):
Y[:, k] = y == k
if sample_weight is None:
return np.sum(-1 * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
else:
return np.sum(-1 * sample_weight * (Y * pred).sum(axis=1) +
logsumexp(pred, axis=1))
def negative_gradient(self, y, pred, k=0, **kwargs):
"""Compute negative gradient for the ``k``-th class. """
return y - np.nan_to_num(np.exp(pred[:, k] -
logsumexp(pred, axis=1)))
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
"""Make a single Newton-Raphson step. """
terminal_region = np.where(terminal_regions == leaf)[0]
residual = residual.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
numerator = np.sum(sample_weight * residual)
numerator *= (self.K - 1) / self.K
denominator = np.sum(sample_weight * (y - residual) *
(1.0 - y + residual))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
return np.nan_to_num(
np.exp(score - (logsumexp(score, axis=1)[:, np.newaxis])))
def _score_to_decision(self, score):
proba = self._score_to_proba(score)
return np.argmax(proba, axis=1)
class ExponentialLoss(ClassificationLossFunction):
"""Exponential loss function for binary classification.
Same loss as AdaBoost.
References
----------
Greg Ridgeway, Generalized Boosted Models: A guide to the gbm package, 2007
"""
def __init__(self, n_classes):
if n_classes != 2:
raise ValueError("{0:s} requires 2 classes.".format(
self.__class__.__name__))
# we only need to fit one tree for binary clf.
super(ExponentialLoss, self).__init__(1)
def init_estimator(self):
return ScaledLogOddsEstimator()
def __call__(self, y, pred, sample_weight=None):
pred = pred.ravel()
if sample_weight is None:
return np.mean(np.exp(-(2. * y - 1.) * pred))
else:
return (1.0 / sample_weight.sum() *
np.sum(sample_weight * np.exp(-(2 * y - 1) * pred)))
def negative_gradient(self, y, pred, **kargs):
y_ = -(2. * y - 1.)
return y_ * np.exp(y_ * pred.ravel())
def _update_terminal_region(self, tree, terminal_regions, leaf, X, y,
residual, pred, sample_weight):
terminal_region = np.where(terminal_regions == leaf)[0]
pred = pred.take(terminal_region, axis=0)
y = y.take(terminal_region, axis=0)
sample_weight = sample_weight.take(terminal_region, axis=0)
y_ = 2. * y - 1.
numerator = np.sum(y_ * sample_weight * np.exp(-y_ * pred))
denominator = np.sum(sample_weight * np.exp(-y_ * pred))
if denominator == 0.0:
tree.value[leaf, 0, 0] = 0.0
else:
tree.value[leaf, 0, 0] = numerator / denominator
def _score_to_proba(self, score):
proba = np.ones((score.shape[0], 2), dtype=np.float64)
proba[:, 1] = expit(2.0 * score.ravel())
proba[:, 0] -= proba[:, 1]
return proba
def _score_to_decision(self, score):
return (score.ravel() >= 0.0).astype(np.int)
LOSS_FUNCTIONS = {'ls': LeastSquaresError,
'lad': LeastAbsoluteError,
'huber': HuberLossFunction,
'quantile': QuantileLossFunction,
'deviance': None, # for both, multinomial and binomial
'exponential': ExponentialLoss,
}
INIT_ESTIMATORS = {'zero': ZeroEstimator}
class VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train Loss']
verbose_fmt = ['{iter:>10d}', '{train_score:>16.4f}']
# do oob?
if est.subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>16.4f}')
header_fields.append('Remaining Time')
verbose_fmt.append('{remaining_time:>16s}')
# print the header line
print(('%10s ' + '%16s ' *
(len(header_fields) - 1)) % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time()
self.begin_at_stage = begin_at_stage
def update(self, j, est):
"""Update reporter with new iteration. """
do_oob = est.subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time))
if self.verbose == 1 and ((i + 1) // (self.verbose_mod * 10) > 0):
# adjust verbose frequency (powers of 10)
self.verbose_mod *= 10
class BaseGradientBoosting(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Abstract base class for Gradient Boosting. """
@abstractmethod
def __init__(self, loss, learning_rate, n_estimators, min_samples_split,
min_samples_leaf, min_weight_fraction_leaf,
max_depth, init, subsample, max_features,
random_state, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.subsample = subsample
self.max_features = max_features
self.max_depth = max_depth
self.init = init
self.random_state = random_state
self.alpha = alpha
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
self.estimators_ = np.empty((0, 0), dtype=np.object)
def _fit_stage(self, i, X, y, y_pred, sample_weight, sample_mask,
criterion, splitter, random_state):
"""Fit another stage of ``n_classes_`` trees to the boosting model. """
assert sample_mask.dtype == np.bool
loss = self.loss_
original_y = y
for k in range(loss.K):
if loss.is_multi_class:
y = np.array(original_y == k, dtype=np.float64)
residual = loss.negative_gradient(y, y_pred, k=k,
sample_weight=sample_weight)
# induce regression tree on residuals
tree = DecisionTreeRegressor(
criterion=criterion,
splitter=splitter,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0:
# no inplace multiplication!
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, residual, sample_weight=sample_weight,
check_input=False)
# update tree leaves
loss.update_terminal_regions(tree.tree_, X, y, residual, y_pred,
sample_weight, sample_mask,
self.learning_rate, k=k)
# add tree to ensemble
self.estimators_[i, k] = tree
return y_pred
def _check_params(self):
"""Check validity of parameters and raise ValueError if not valid. """
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if (self.loss not in self._SUPPORTED_LOSS
or self.loss not in LOSS_FUNCTIONS):
raise ValueError("Loss '{0:s}' not supported. ".format(self.loss))
if self.loss == 'deviance':
loss_class = (MultinomialDeviance
if len(self.classes_) > 2
else BinomialDeviance)
else:
loss_class = LOSS_FUNCTIONS[self.loss]
if self.loss in ('huber', 'quantile'):
self.loss_ = loss_class(self.n_classes_, self.alpha)
else:
self.loss_ = loss_class(self.n_classes_)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if self.init is not None:
if isinstance(self.init, six.string_types):
if self.init not in INIT_ESTIMATORS:
raise ValueError('init="%s" is not supported' % self.init)
else:
if (not hasattr(self.init, 'fit')
or not hasattr(self.init, 'predict')):
raise ValueError("init=%r must be valid BaseEstimator "
"and support both fit and "
"predict" % self.init)
if not (0.0 < self.alpha < 1.0):
raise ValueError("alpha must be in (0.0, 1.0) but "
"was %r" % self.alpha)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
# if is_classification
if self.n_classes_ > 1:
max_features = max(1, int(np.sqrt(self.n_features)))
else:
# is regression
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0, n_features]")
self.max_features_ = max_features
def _init_state(self):
"""Initialize model state and allocate model state data structures. """
if self.init is None:
self.init_ = self.loss_.init_estimator()
elif isinstance(self.init, six.string_types):
self.init_ = INIT_ESTIMATORS[self.init]()
else:
self.init_ = self.init
self.estimators_ = np.empty((self.n_estimators, self.loss_.K),
dtype=np.object)
self.train_score_ = np.zeros((self.n_estimators,), dtype=np.float64)
# do oob?
if self.subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators),
dtype=np.float64)
def _clear_state(self):
"""Clear the state of the gradient boosting model. """
if hasattr(self, 'estimators_'):
self.estimators_ = np.empty((0, 0), dtype=np.object)
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
if hasattr(self, 'init_'):
del self.init_
def _resize_state(self):
"""Add additional ``n_estimators`` entries to all attributes. """
# self.n_estimators is the number of additional est to fit
total_n_estimators = self.n_estimators
if total_n_estimators < self.estimators_.shape[0]:
raise ValueError('resize with smaller n_estimators %d < %d' %
(total_n_estimators, self.estimators_[0]))
self.estimators_.resize((total_n_estimators, self.loss_.K))
self.train_score_.resize(total_n_estimators)
if (self.subsample < 1 or hasattr(self, 'oob_improvement_')):
# if do oob resize arrays or create new if not available
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_.resize(total_n_estimators)
else:
self.oob_improvement_ = np.zeros((total_n_estimators,),
dtype=np.float64)
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def fit(self, X, y, sample_weight=None, monitor=None):
"""Fit the gradient boosting model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspect, and
snapshoting.
Returns
-------
self : object
Returns self.
"""
# if not warmstart - clear the estimator state
if not self.warm_start:
self._clear_state()
# Check input
X, y = check_X_y(X, y, dtype=DTYPE)
n_samples, self.n_features = X.shape
if sample_weight is None:
sample_weight = np.ones(n_samples, dtype=np.float32)
else:
sample_weight = column_or_1d(sample_weight, warn=True)
check_consistent_length(X, y, sample_weight)
y = self._validate_y(y)
random_state = check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
# init state
self._init_state()
# fit initial model - FIXME make sample_weight optional
self.init_.fit(X, y, sample_weight)
# init predictions
y_pred = self.init_.predict(X)
begin_at_stage = 0
else:
# add more estimators to fitted model
# invariant: warm_start = True
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
y_pred = self._decision_function(X)
self._resize_state()
# fit the boosting stages
n_stages = self._fit_stages(X, y, y_pred, sample_weight, random_state,
begin_at_stage, monitor)
# change shape of arrays after fit (early-stopping or additional ests)
if n_stages != self.estimators_.shape[0]:
self.estimators_ = self.estimators_[:n_stages]
self.train_score_ = self.train_score_[:n_stages]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n_stages]
return self
def _fit_stages(self, X, y, y_pred, sample_weight, random_state,
begin_at_stage=0, monitor=None):
"""Iteratively fits the stages.
For each stage it computes the progress (OOB, train score)
and delegates to ``_fit_stage``.
Returns the number of stages fit; might differ from ``n_estimators``
due to early stopping.
"""
n_samples = X.shape[0]
do_oob = self.subsample < 1.0
sample_mask = np.ones((n_samples, ), dtype=np.bool)
n_inbag = max(1, int(self.subsample * n_samples))
loss_ = self.loss_
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# init criterion and splitter
criterion = FriedmanMSE(1)
splitter = PresortBestSplitter(criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
if self.verbose:
verbose_reporter = VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
# perform boosting iterations
i = begin_at_stage
for i in range(begin_at_stage, self.n_estimators):
# subsampling
if do_oob:
sample_mask = _random_sample_mask(n_samples, n_inbag,
random_state)
# OOB score before adding this stage
old_oob_score = loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask])
# fit next stage of trees
y_pred = self._fit_stage(i, X, y, y_pred, sample_weight,
sample_mask, criterion, splitter,
random_state)
# track deviance (= loss)
if do_oob:
self.train_score_[i] = loss_(y[sample_mask],
y_pred[sample_mask],
sample_weight[sample_mask])
self.oob_improvement_[i] = (
old_oob_score - loss_(y[~sample_mask],
y_pred[~sample_mask],
sample_weight[~sample_mask]))
else:
# no need to fancy index w/ no subsampling
self.train_score_[i] = loss_(y, y_pred, sample_weight)
if self.verbose > 0:
verbose_reporter.update(i, self)
if monitor is not None:
early_stopping = monitor(i, self, locals())
if early_stopping:
break
return i + 1
def _make_estimator(self, append=True):
# we don't need _make_estimator
raise NotImplementedError()
def _init_decision_function(self, X):
"""Check input and compute prediction of ``init``. """
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit`"
" before making predictions`.")
if X.shape[1] != self.n_features:
raise ValueError("X.shape[1] should be {0:d}, not {1:d}.".format(
self.n_features, X.shape[1]))
score = self.init_.predict(X).astype(np.float64)
return score
def _decision_function(self, X):
# for use in inner loop, not raveling the output in single-class case,
# not doing input validation.
score = self._init_decision_function(X)
predict_stages(self.estimators_, X, self.learning_rate, score)
return score
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def _staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._init_decision_function(X)
for i in range(self.estimators_.shape[0]):
predict_stage(self.estimators_, i, X, self.learning_rate, score)
yield score.copy()
@deprecated(" and will be removed in 0.19")
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _validate_y(self, y):
self.n_classes_ = 1
if y.dtype.kind == 'O':
y = y.astype(np.float64)
# Default implementation
return y
class GradientBoostingClassifier(BaseGradientBoosting, ClassifierMixin):
"""Gradient Boosting for classification.
GB builds an additive model in a
forward stage-wise fashion; it allows for the optimization of
arbitrary differentiable loss functions. In each stage ``n_classes_``
regression trees are fit on the negative gradient of the
binomial or multinomial deviance loss function. Binary classification
is a special case where only a single regression tree is induced.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'deviance', 'exponential'}, optional (default='deviance')
loss function to be optimized. 'deviance' refers to
deviance (= logistic regression) for classification
with probabilistic outputs. For loss 'exponential' gradient
boosting recovers the AdaBoost algorithm.
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
init : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, loss_.K]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
See also
--------
sklearn.tree.DecisionTreeClassifier, RandomForestClassifier
AdaBoostClassifier
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('deviance', 'exponential')
def __init__(self, loss='deviance', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, verbose=0,
max_leaf_nodes=None, warm_start=False):
super(GradientBoostingClassifier, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def _validate_y(self, y):
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : array, shape = [n_samples, n_classes] or [n_samples]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification produce an array of shape
[n_samples].
"""
X = check_array(X, dtype=DTYPE, order="C")
score = self._decision_function(X)
if score.shape[1] == 1:
return score.ravel()
return score
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each iteration.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
Regression and binary classification are special cases with
``k == 1``, otherwise ``k==n_classes``.
"""
for dec in self._staged_decision_function(X):
# no yield from in Python2.X
yield dec
def predict(self, X):
"""Predict class for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = ["n_samples]
The predicted values.
"""
score = self.decision_function(X)
decisions = self.loss_._score_to_decision(score)
return self.classes_.take(decisions, axis=0)
def staged_predict(self, X):
"""Predict class at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for score in self._staged_decision_function(X):
decisions = self.loss_._score_to_decision(score)
yield self.classes_.take(decisions, axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
score = self.decision_function(X)
try:
return self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Raises
------
AttributeError
If the ``loss`` does not support probabilities.
Returns
-------
p : array of shape = [n_samples]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
return np.log(proba)
def staged_predict_proba(self, X):
"""Predict class probabilities at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
try:
for score in self._staged_decision_function(X):
yield self.loss_._score_to_proba(score)
except NotFittedError:
raise
except AttributeError:
raise AttributeError('loss=%r does not support predict_proba' %
self.loss)
class GradientBoostingRegressor(BaseGradientBoosting, RegressorMixin):
"""Gradient Boosting for regression.
GB builds an additive model in a forward stage-wise fashion;
it allows for the optimization of arbitrary differentiable loss functions.
In each stage a regression tree is fit on the negative gradient of the
given loss function.
Read more in the :ref:`User Guide <gradient_boosting>`.
Parameters
----------
loss : {'ls', 'lad', 'huber', 'quantile'}, optional (default='ls')
loss function to be optimized. 'ls' refers to least squares
regression. 'lad' (least absolute deviation) is a highly robust
loss function solely based on order information of the input
variables. 'huber' is a combination of the two. 'quantile'
allows quantile regression (use `alpha` to specify the quantile).
learning_rate : float, optional (default=0.1)
learning rate shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : integer, optional (default=3)
maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. If smaller than 1.0 this results in Stochastic Gradient
Boosting. `subsample` interacts with the parameter `n_estimators`.
Choosing `subsample < 1.0` leads to a reduction of variance
and an increase in bias.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
alpha : float (default=0.9)
The alpha-quantile of the huber loss function and the quantile
loss function. Only if ``loss='huber'`` or ``loss='quantile'``.
init : BaseEstimator, None, optional (default=None)
An estimator object that is used to compute the initial
predictions. ``init`` has to provide ``fit`` and ``predict``.
If None it uses ``loss.init_estimator``.
verbose : int, default: 0
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
loss_ : LossFunction
The concrete ``LossFunction`` object.
`init` : BaseEstimator
The estimator that provides the initial predictions.
Set via the ``init`` argument or ``loss.init_estimator``.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators.
See also
--------
DecisionTreeRegressor, RandomForestRegressor
References
----------
J. Friedman, Greedy Function Approximation: A Gradient Boosting
Machine, The Annals of Statistics, Vol. 29, No. 5, 2001.
J. Friedman, Stochastic Gradient Boosting, 1999
T. Hastie, R. Tibshirani and J. Friedman.
Elements of Statistical Learning Ed. 2, Springer, 2009.
"""
_SUPPORTED_LOSS = ('ls', 'lad', 'huber', 'quantile')
def __init__(self, loss='ls', learning_rate=0.1, n_estimators=100,
subsample=1.0, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.,
max_depth=3, init=None, random_state=None,
max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None,
warm_start=False):
super(GradientBoostingRegressor, self).__init__(
loss=loss, learning_rate=learning_rate, n_estimators=n_estimators,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_depth=max_depth, init=init, subsample=subsample,
max_features=max_features,
random_state=random_state, alpha=alpha, verbose=verbose,
max_leaf_nodes=max_leaf_nodes, warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
X = check_array(X, dtype=DTYPE, order="C")
return self._decision_function(X).ravel()
def staged_predict(self, X):
"""Predict regression target at each stage for X.
This method allows monitoring (i.e. determine error on testing set)
after each stage.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array of shape = [n_samples]
The predicted value of the input samples.
"""
for y in self._staged_decision_function(X):
yield y.ravel()
| bsd-3-clause |
synthicity/urbansim | urbansim/models/transition.py | 4 | 17258 | """
Use the ``TransitionModel`` class with the different transitioners to
add or remove agents based on growth rates or target totals.
"""
from __future__ import division
import logging
import numpy as np
import pandas as pd
from . import util
from ..utils.logutil import log_start_finish
from ..utils.sampling import sample_rows
logger = logging.getLogger(__name__)
def _empty_index():
return pd.Index([])
def add_rows(data, nrows, starting_index=None, accounting_column=None):
"""
Add rows to data table according to a given nrows.
New rows will have their IDs set to NaN.
Parameters
----------
data : pandas.DataFrame
nrows : int
Number of rows to add.
starting_index : int, optional
The starting index from which to calculate indexes for the new
rows. If not given the max + 1 of the index of `data` will be used.
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
Returns
-------
updated : pandas.DataFrame
Table with rows added. New rows will have their index values
set to NaN.
added : pandas.Index
New indexes of the rows that were added.
copied : pandas.Index
Indexes of rows that were copied. A row copied multiple times
will have multiple entries.
"""
logger.debug('start: adding {} rows in transition model'.format(nrows))
if nrows == 0:
return data, _empty_index(), _empty_index()
if not starting_index:
starting_index = data.index.values.max() + 1
new_rows = sample_rows(nrows, data, accounting_column=accounting_column)
copied_index = new_rows.index
added_index = pd.Index(np.arange(
starting_index, starting_index + len(new_rows.index), dtype=np.int))
new_rows.index = added_index
logger.debug(
'finish: added {} rows in transition model'.format(len(new_rows)))
return pd.concat([data, new_rows]), added_index, copied_index
def remove_rows(data, nrows, accounting_column=None):
"""
Remove a random `nrows` number of rows from a table.
Parameters
----------
data : DataFrame
nrows : float
Number of rows to remove.
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
Returns
-------
updated : pandas.DataFrame
Table with random rows removed.
removed : pandas.Index
Indexes of the rows removed from the table.
"""
logger.debug('start: removing {} rows in transition model'.format(nrows))
nrows = abs(nrows) # in case a negative number came in
unit_check = data[accounting_column].sum() if accounting_column else len(data)
if nrows == 0:
return data, _empty_index()
elif nrows > unit_check:
raise ValueError('Number of rows to remove exceeds number of records in table.')
remove_rows = sample_rows(nrows, data, accounting_column=accounting_column, replace=False)
remove_index = remove_rows.index
logger.debug('finish: removed {} rows in transition model'.format(nrows))
return data.loc[data.index.difference(remove_index)], remove_index
def add_or_remove_rows(data, nrows, starting_index=None, accounting_column=None):
"""
Add or remove rows to/from a table. Rows are added
for positive `nrows` and removed for negative `nrows`.
Parameters
----------
data : DataFrame
nrows : float
Number of rows to add or remove.
starting_index : int, optional
The starting index from which to calculate indexes for new rows.
If not given the max + 1 of the index of `data` will be used.
(Not applicable if rows are being removed.)
Returns
-------
updated : pandas.DataFrame
Table with random rows removed.
added : pandas.Index
New indexes of the rows that were added.
copied : pandas.Index
Indexes of rows that were copied. A row copied multiple times
will have multiple entries.
removed : pandas.Index
Index of rows that were removed.
"""
if nrows > 0:
updated, added, copied = add_rows(
data, nrows, starting_index,
accounting_column=accounting_column)
removed = _empty_index()
elif nrows < 0:
updated, removed = remove_rows(data, nrows, accounting_column=accounting_column)
added, copied = _empty_index(), _empty_index()
else:
updated, added, copied, removed = \
data, _empty_index(), _empty_index(), _empty_index()
return updated, added, copied, removed
class GrowthRateTransition(object):
"""
Transition given tables using a simple growth rate.
Parameters
----------
growth_rate : float
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
"""
def __init__(self, growth_rate, accounting_column=None):
self.growth_rate = growth_rate
self.accounting_column = accounting_column
def transition(self, data, year):
"""
Add or remove rows to/from a table according to the prescribed
growth rate for this model.
Parameters
----------
data : pandas.DataFrame
Rows will be removed from or added to this table.
year : None, optional
Here for compatibility with other transition models,
but ignored.
Returns
-------
updated : pandas.DataFrame
Table with rows removed or added.
added : pandas.Index
New indexes of the rows that were added.
copied : pandas.Index
Indexes of rows that were copied. A row copied multiple times
will have multiple entries.
removed : pandas.Index
Index of rows that were removed.
"""
if self.accounting_column is None:
nrows = int(round(len(data) * self.growth_rate))
else:
nrows = int(round(data[self.accounting_column].sum() * self.growth_rate))
with log_start_finish(
'adding {} rows via growth rate ({}) transition'.format(
nrows, self.growth_rate),
logger):
return add_or_remove_rows(data, nrows, accounting_column=self.accounting_column)
def __call__(self, data, year):
"""
Call `self.transition` with inputs.
"""
return self.transition(data, year)
class TabularGrowthRateTransition(object):
"""
Growth rate based transitions where the rates are stored in
a table indexed by year with optional segmentation.
Parameters
----------
growth_rates : pandas.DataFrame
rates_column : str
Name of the column in `growth_rates` that contains the rates.
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
"""
def __init__(self, growth_rates, rates_column, accounting_column=None):
self.growth_rates = growth_rates
self.rates_column = rates_column
self.accounting_column = accounting_column
@property
def _config_table(self):
"""
Table that has transition configuration.
"""
return self.growth_rates
@property
def _config_column(self):
"""
Non-filter column in config table.
"""
return self.rates_column
def _calc_nrows(self, len_data, growth_rate):
"""
Calculate the number of rows to add to or remove from some data.
Parameters
----------
len_data : int
The current number of rows in the data table.
growth_rate : float
Growth rate as a fraction. Positive for growth, negative
for removing rows.
"""
return int(round(len_data * growth_rate))
def transition(self, data, year):
"""
Add or remove rows to/from a table according to the prescribed
growth rate for this model and year.
Parameters
----------
data : pandas.DataFrame
Rows will be removed from or added to this table.
year : None, optional
Here for compatibility with other transition models,
but ignored.
Returns
-------
updated : pandas.DataFrame
Table with rows removed or added.
added : pandas.Index
New indexes of the rows that were added.
copied : pandas.Index
Indexes of rows that were copied. A row copied multiple times
will have multiple entries.
removed : pandas.Index
Index of rows that were removed.
"""
logger.debug('start: tabular transition')
if year not in self._config_table.index:
raise ValueError('No targets for given year: {}'.format(year))
# want this to be a DataFrame
year_config = self._config_table.loc[[year]]
logger.debug('transitioning {} segments'.format(len(year_config)))
segments = []
added_indexes = []
copied_indexes = []
removed_indexes = []
# since we're looping over discrete segments we need to track
# out here where their new indexes will begin
starting_index = data.index.values.max() + 1
for _, row in year_config.iterrows():
subset = util.filter_table(data, row, ignore={self._config_column})
# Do not run on segment if it is empty
if len(subset) == 0:
logger.debug('empty segment encountered')
continue
if self.accounting_column is None:
nrows = self._calc_nrows(len(subset), row[self._config_column])
else:
nrows = self._calc_nrows(
subset[self.accounting_column].sum(),
row[self._config_column])
updated, added, copied, removed = \
add_or_remove_rows(subset, nrows, starting_index, self.accounting_column)
if nrows > 0:
# only update the starting index if rows were added
starting_index = starting_index + nrows
segments.append(updated)
added_indexes.append(added)
copied_indexes.append(copied)
removed_indexes.append(removed)
updated = pd.concat(segments)
added_indexes = util.concat_indexes(added_indexes)
copied_indexes = util.concat_indexes(copied_indexes)
removed_indexes = util.concat_indexes(removed_indexes)
logger.debug('finish: tabular transition')
return updated, added_indexes, copied_indexes, removed_indexes
def __call__(self, data, year):
"""
Call `self.transition` with inputs.
"""
return self.transition(data, year)
class TabularTotalsTransition(TabularGrowthRateTransition):
"""
Transition data via control totals in pandas DataFrame with
optional segmentation.
Parameters
----------
targets : pandas.DataFrame
totals_column : str
Name of the column in `targets` that contains the control totals.
accounting_column: string, optional
Name of column with accounting totals/quanties to apply towards the control. If not provided
then row counts will be used for accounting.
"""
def __init__(self, targets, totals_column, accounting_column=None):
self.targets = targets
self.totals_column = totals_column
self.accounting_column = accounting_column
@property
def _config_table(self):
"""
Table that has transition configuration.
"""
return self.targets
@property
def _config_column(self):
"""
Non-filter column in config table.
"""
return self.totals_column
def _calc_nrows(self, len_data, target_pop):
"""
Calculate the number of rows to add to or remove from some data.
Parameters
----------
len_data : int
The current number of rows in the data table.
target_pop : int
Target population.
"""
return target_pop - len_data
def transition(self, data, year):
"""
Add or remove rows to/from a table according to the prescribed
totals for this model and year.
Parameters
----------
data : pandas.DataFrame
Rows will be removed from or added to this table.
year : None, optional
Here for compatibility with other transition models,
but ignored.
Returns
-------
updated : pandas.DataFrame
Table with rows removed or added.
added : pandas.Index
New indexes of the rows that were added.
copied : pandas.Index
Indexes of rows that were copied. A row copied multiple times
will have multiple entries.
removed : pandas.Index
Index of rows that were removed.
"""
with log_start_finish('tabular totals transition', logger):
return super(TabularTotalsTransition, self).transition(data, year)
def _update_linked_table(table, col_name, added, copied, removed):
"""
Copy and update rows in a table that has a column referencing another
table that has had rows added via copying.
Parameters
----------
table : pandas.DataFrame
Table to update with new or removed rows.
col_name : str
Name of column in `table` that corresponds to the index values
in `copied` and `removed`.
added : pandas.Index
Indexes of rows that are new in the linked table.
copied : pandas.Index
Indexes of rows that were copied to make new rows in linked table.
removed : pandas.Index
Indexes of rows that were removed from the linked table.
Returns
-------
updated : pandas.DataFrame
"""
logger.debug('start: update linked table after transition')
# handle removals
table = table.loc[~table[col_name].isin(set(removed))]
if (added is None or len(added) == 0):
return table
# map new IDs to the IDs from which they were copied
id_map = pd.concat([pd.Series(copied, name=col_name), pd.Series(added, name='temp_id')], axis=1)
# join to linked table and assign new id
new_rows = id_map.merge(table, on=col_name)
new_rows.drop(col_name, axis=1, inplace=True)
new_rows.rename(columns={'temp_id': col_name}, inplace=True)
# index the new rows
starting_index = table.index.values.max() + 1
new_rows.index = np.arange(starting_index, starting_index + len(new_rows), dtype=np.int)
logger.debug('finish: update linked table after transition')
return pd.concat([table, new_rows])
class TransitionModel(object):
"""
Models things moving into or out of a region.
Parameters
----------
transitioner : callable
A callable that takes a data table and a year number and returns
and new data table, the indexes of rows added, the indexes
of rows copied, and the indexes of rows removed.
"""
def __init__(self, transitioner):
self.transitioner = transitioner
def transition(self, data, year, linked_tables=None):
"""
Add or remove rows from a table based on population targets.
Parameters
----------
data : pandas.DataFrame
Rows will be removed from or added to this table.
year : int
Year number that will be passed to `transitioner`.
linked_tables : dict of tuple, optional
Dictionary of (table, 'column name') pairs. The column name
should match the index of `data`. Indexes in `data` that
are copied or removed will also be copied and removed in
linked tables. They dictionary keys are used in the
returned `updated_links`.
Returns
-------
updated : pandas.DataFrame
Table with rows removed or added.
added : pandas.Series
Indexes of new rows in `updated`.
updated_links : dict of pandas.DataFrame
"""
logger.debug('start: transition')
linked_tables = linked_tables or {}
updated_links = {}
with log_start_finish('add/remove rows', logger):
updated, added, copied, removed = self.transitioner(data, year)
for table_name, (table, col) in linked_tables.items():
logger.debug('updating linked table {}'.format(table_name))
updated_links[table_name] = \
_update_linked_table(table, col, added, copied, removed)
logger.debug('finish: transition')
return updated, added, updated_links
| bsd-3-clause |
zhenv5/scikit-learn | examples/svm/plot_svm_scale_c.py | 223 | 5375 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
# Jaques Grobler <jaques.grobler@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.cross_validation import ShuffleSplit
from sklearn.grid_search import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['b', 'g', 'r', 'c']
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(n=n_samples, train_size=train_size,
n_iter=250, random_state=1))
grid.fit(X, y)
scores = [x[1] for x in grid.grid_scores_]
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
rohit21122012/DCASE2013 | runs/2016/dnn2016med_traps/traps39/src/dataset.py | 55 | 78980 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import locale
import socket
import tarfile
import urllib2
import zipfile
from sklearn.cross_validation import StratifiedShuffleSplit, KFold
from files import *
from general import *
from ui import *
class Dataset(object):
"""Dataset base class.
The specific dataset classes are inherited from this class, and only needed methods are reimplemented.
"""
def __init__(self, data_path='data', name='dataset'):
"""__init__ method.
Parameters
----------
data_path : str
Basepath where the dataset is stored.
(Default value='data')
"""
# Folder name for dataset
self.name = name
# Path to the dataset
self.local_path = os.path.join(data_path, self.name)
# Create the dataset path if does not exist
if not os.path.isdir(self.local_path):
os.makedirs(self.local_path)
# Evaluation setup folder
self.evaluation_setup_folder = 'evaluation_setup'
# Path to the folder containing evaluation setup files
self.evaluation_setup_path = os.path.join(self.local_path, self.evaluation_setup_folder)
# Meta data file, csv-format
self.meta_filename = 'meta.txt'
# Path to meta data file
self.meta_file = os.path.join(self.local_path, self.meta_filename)
# Hash file to detect removed or added files
self.filelisthash_filename = 'filelist.hash'
# Number of evaluation folds
self.evaluation_folds = 1
# List containing dataset package items
# Define this in the inherited class.
# Format:
# {
# 'remote_package': download_url,
# 'local_package': os.path.join(self.local_path, 'name_of_downloaded_package'),
# 'local_audio_path': os.path.join(self.local_path, 'name_of_folder_containing_audio_files'),
# }
self.package_list = []
# List of audio files
self.files = None
# List of meta data dict
self.meta_data = None
# Training meta data for folds
self.evaluation_data_train = {}
# Testing meta data for folds
self.evaluation_data_test = {}
# Recognized audio extensions
self.audio_extensions = {'wav', 'flac'}
# Info fields for dataset
self.authors = ''
self.name_remote = ''
self.url = ''
self.audio_source = ''
self.audio_type = ''
self.recording_device_model = ''
self.microphone_model = ''
@property
def audio_files(self):
"""Get all audio files in the dataset
Parameters
----------
Nothing
Returns
-------
filelist : list
File list with absolute paths
"""
if self.files is None:
self.files = []
for item in self.package_list:
path = item['local_audio_path']
if path:
l = os.listdir(path)
for f in l:
file_name, file_extension = os.path.splitext(f)
if file_extension[1:] in self.audio_extensions:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
@property
def audio_file_count(self):
"""Get number of audio files in dataset
Parameters
----------
Nothing
Returns
-------
filecount : int
Number of audio files
"""
return len(self.audio_files)
@property
def meta(self):
"""Get meta data for dataset. If not already read from disk, data is read and returned.
Parameters
----------
Nothing
Returns
-------
meta_data : list
List containing meta data as dict.
Raises
-------
IOError
meta file not found.
"""
if self.meta_data is None:
self.meta_data = []
meta_id = 0
if os.path.isfile(self.meta_file):
f = open(self.meta_file, 'rt')
try:
reader = csv.reader(f, delimiter='\t')
for row in reader:
if len(row) == 2:
# Scene meta
self.meta_data.append({'file': row[0], 'scene_label': row[1].rstrip()})
elif len(row) == 4:
# Audio tagging meta
self.meta_data.append(
{'file': row[0], 'scene_label': row[1].rstrip(), 'tag_string': row[2].rstrip(),
'tags': row[3].split(';')})
elif len(row) == 6:
# Event meta
self.meta_data.append({'file': row[0],
'scene_label': row[1].rstrip(),
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4].rstrip(),
'event_type': row[5].rstrip(),
'id': meta_id
})
meta_id += 1
finally:
f.close()
else:
raise IOError("Meta file not found [%s]" % self.meta_file)
return self.meta_data
@property
def meta_count(self):
"""Number of meta data items.
Parameters
----------
Nothing
Returns
-------
meta_item_count : int
Meta data item count
"""
return len(self.meta)
@property
def fold_count(self):
"""Number of fold in the evaluation setup.
Parameters
----------
Nothing
Returns
-------
fold_count : int
Number of folds
"""
return self.evaluation_folds
@property
def scene_labels(self):
"""List of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of scene labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'scene_label' in item and item['scene_label'] not in labels:
labels.append(item['scene_label'])
labels.sort()
return labels
@property
def scene_label_count(self):
"""Number of unique scene labels in the meta data.
Parameters
----------
Nothing
Returns
-------
scene_label_count : int
Number of unique scene labels.
"""
return len(self.scene_labels)
@property
def event_labels(self):
"""List of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of event labels in alphabetical order.
"""
labels = []
for item in self.meta:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
@property
def event_label_count(self):
"""Number of unique event labels in the meta data.
Parameters
----------
Nothing
Returns
-------
event_label_count : int
Number of unique event labels
"""
return len(self.event_labels)
@property
def audio_tags(self):
"""List of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
labels : list
List of audio tags in alphabetical order.
"""
tags = []
for item in self.meta:
if 'tags' in item:
for tag in item['tags']:
if tag and tag not in tags:
tags.append(tag)
tags.sort()
return tags
@property
def audio_tag_count(self):
"""Number of unique audio tags in the meta data.
Parameters
----------
Nothing
Returns
-------
audio_tag_count : int
Number of unique audio tags
"""
return len(self.audio_tags)
def __getitem__(self, i):
"""Getting meta data item
Parameters
----------
i : int
item id
Returns
-------
meta_data : dict
Meta data item
"""
if i < len(self.meta):
return self.meta[i]
else:
return None
def __iter__(self):
"""Iterator for meta data items
Parameters
----------
Nothing
Returns
-------
Nothing
"""
i = 0
meta = self[i]
# yield window while it's valid
while meta is not None:
yield meta
# get next item
i += 1
meta = self[i]
@staticmethod
def print_bytes(num_bytes):
"""Output number of bytes according to locale and with IEC binary prefixes
Parameters
----------
num_bytes : int > 0 [scalar]
Bytes
Returns
-------
bytes : str
Human readable string
"""
KiB = 1024
MiB = KiB * KiB
GiB = KiB * MiB
TiB = KiB * GiB
PiB = KiB * TiB
EiB = KiB * PiB
ZiB = KiB * EiB
YiB = KiB * ZiB
locale.setlocale(locale.LC_ALL, '')
output = locale.format("%d", num_bytes, grouping=True) + ' bytes'
if num_bytes > YiB:
output += ' (%.4g YiB)' % (num_bytes / YiB)
elif num_bytes > ZiB:
output += ' (%.4g ZiB)' % (num_bytes / ZiB)
elif num_bytes > EiB:
output += ' (%.4g EiB)' % (num_bytes / EiB)
elif num_bytes > PiB:
output += ' (%.4g PiB)' % (num_bytes / PiB)
elif num_bytes > TiB:
output += ' (%.4g TiB)' % (num_bytes / TiB)
elif num_bytes > GiB:
output += ' (%.4g GiB)' % (num_bytes / GiB)
elif num_bytes > MiB:
output += ' (%.4g MiB)' % (num_bytes / MiB)
elif num_bytes > KiB:
output += ' (%.4g KiB)' % (num_bytes / KiB)
return output
def download(self):
"""Download dataset over the internet to the local path
Parameters
----------
Nothing
Returns
-------
Nothing
Raises
-------
IOError
Download failed.
"""
section_header('Download dataset')
for item in self.package_list:
try:
if item['remote_package'] and not os.path.isfile(item['local_package']):
data = None
req = urllib2.Request(item['remote_package'], data, {})
handle = urllib2.urlopen(req)
if "Content-Length" in handle.headers.items():
size = int(handle.info()["Content-Length"])
else:
size = None
actualSize = 0
blocksize = 64 * 1024
tmp_file = os.path.join(self.local_path, 'tmp_file')
fo = open(tmp_file, "wb")
terminate = False
while not terminate:
block = handle.read(blocksize)
actualSize += len(block)
if size:
progress(title_text=os.path.split(item['local_package'])[1],
percentage=actualSize / float(size),
note=self.print_bytes(actualSize))
else:
progress(title_text=os.path.split(item['local_package'])[1],
note=self.print_bytes(actualSize))
if len(block) == 0:
break
fo.write(block)
fo.close()
os.rename(tmp_file, item['local_package'])
except (urllib2.URLError, socket.timeout), e:
try:
fo.close()
except:
raise IOError('Download failed [%s]' % (item['remote_package']))
foot()
def extract(self):
"""Extract the dataset packages
Parameters
----------
Nothing
Returns
-------
Nothing
"""
section_header('Extract dataset')
for item_id, item in enumerate(self.package_list):
if item['local_package']:
if item['local_package'].endswith('.zip'):
with zipfile.ZipFile(item['local_package'], "r") as z:
# Trick to omit first level folder
parts = []
for name in z.namelist():
if not name.endswith('/'):
parts.append(name.split('/')[:-1])
prefix = os.path.commonprefix(parts) or ''
if prefix:
if len(prefix) > 1:
prefix_ = list()
prefix_.append(prefix[0])
prefix = prefix_
prefix = '/'.join(prefix) + '/'
offset = len(prefix)
# Start extraction
members = z.infolist()
file_count = 1
for i, member in enumerate(members):
if len(member.filename) > offset:
member.filename = member.filename[offset:]
if not os.path.isfile(os.path.join(self.local_path, member.filename)):
z.extract(member, self.local_path)
progress(
title_text='Extracting [' + str(item_id) + '/' + str(len(self.package_list)) + ']',
percentage=(file_count / float(len(members))),
note=member.filename)
file_count += 1
elif item['local_package'].endswith('.tar.gz'):
tar = tarfile.open(item['local_package'], "r:gz")
for i, tar_info in enumerate(tar):
if not os.path.isfile(os.path.join(self.local_path, tar_info.name)):
tar.extract(tar_info, self.local_path)
progress(title_text='Extracting [' + str(item_id) + '/' + str(len(self.package_list)) + ']',
note=tar_info.name)
tar.members = []
tar.close()
foot()
def on_after_extract(self):
"""Dataset meta data preparation, this will be overloaded in dataset specific classes
Parameters
----------
Nothing
Returns
-------
Nothing
"""
pass
def get_filelist(self):
"""List of files under local_path
Parameters
----------
Nothing
Returns
-------
filelist: list
File list
"""
filelist = []
for path, subdirs, files in os.walk(self.local_path):
for name in files:
filelist.append(os.path.join(path, name))
return filelist
def check_filelist(self):
"""Generates hash from file list and check does it matches with one saved in filelist.hash.
If some files have been deleted or added, checking will result False.
Parameters
----------
Nothing
Returns
-------
result: bool
Result
"""
if os.path.isfile(os.path.join(self.local_path, self.filelisthash_filename)):
hash = load_text(os.path.join(self.local_path, self.filelisthash_filename))[0]
if hash != get_parameter_hash(sorted(self.get_filelist())):
return False
else:
return True
else:
return False
def save_filelist_hash(self):
"""Generates file list hash, and saves it as filelist.hash under local_path.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
filelist = self.get_filelist()
filelist_hash_not_found = True
for file in filelist:
if self.filelisthash_filename in file:
filelist_hash_not_found = False
if filelist_hash_not_found:
filelist.append(os.path.join(self.local_path, self.filelisthash_filename))
save_text(os.path.join(self.local_path, self.filelisthash_filename), get_parameter_hash(sorted(filelist)))
def fetch(self):
"""Download, extract and prepare the dataset.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
if not self.check_filelist():
self.download()
self.extract()
self.on_after_extract()
self.save_filelist_hash()
return self
def train(self, fold=0):
"""List of training items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to training set for given fold.
"""
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 2:
# Scene meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1]
})
elif len(row) == 4:
# Audio tagging meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'tag_string': row[2],
'tags': row[3].split(';')
})
elif len(row) == 5:
# Event meta
self.evaluation_data_train[fold].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
else:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label']
})
self.evaluation_data_train[0] = data
return self.evaluation_data_train[fold]
def test(self, fold=0):
"""List of testing items.
Parameters
----------
fold : int > 0 [scalar]
Fold id, if zero all meta data is returned.
(Default value=0)
Returns
-------
list : list of dicts
List containing all meta data assigned to testing set for given fold.
"""
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold].append({'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[fold] = data
return self.evaluation_data_test[fold]
def folds(self, mode='folds'):
"""List of fold ids
Parameters
----------
mode : str {'folds','full'}
Fold setup type, possible values are 'folds' and 'full'. In 'full' mode fold number is set 0 and all data is used for training.
(Default value=folds)
Returns
-------
list : list of integers
Fold ids
"""
if mode == 'folds':
return range(1, self.evaluation_folds + 1)
elif mode == 'full':
return [0]
def file_meta(self, file):
"""Meta data for given file
Parameters
----------
file : str
File name
Returns
-------
list : list of dicts
List containing all meta data related to given file.
"""
file = self.absolute_to_relative(file)
file_meta = []
for item in self.meta:
if item['file'] == file:
file_meta.append(item)
return file_meta
def relative_to_absolute_path(self, path):
"""Converts relative path into absolute path.
Parameters
----------
path : str
Relative path
Returns
-------
path : str
Absolute path
"""
return os.path.abspath(os.path.join(self.local_path, path))
def absolute_to_relative(self, path):
"""Converts absolute path into relative path.
Parameters
----------
path : str
Absolute path
Returns
-------
path : str
Relative path
"""
if path.startswith(os.path.abspath(self.local_path)):
return os.path.relpath(path, self.local_path)
else:
return path
# =====================================================
# DCASE 2016
# =====================================================
class TUTAcousticScenes_2016_DevelopmentSet(Dataset):
"""TUT Acoustic scenes 2016 development dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, development dataset'
self.url = 'https://zenodo.org/record/45739'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.1.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.1.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.2.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.2.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.3.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.3.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.4.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.4.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.5.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.5.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.6.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.6.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.7.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.7.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45739/files/TUT-acoustic-scenes-2016-development.audio.8.zip',
'local_package': os.path.join(self.local_path, 'TUT-acoustic-scenes-2016-development.audio.8.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
}
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
meta_data = {}
for fold in xrange(1, self.evaluation_folds):
# Read train files in
train_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')
f = open(train_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
# Read evaluation files in
eval_filename = os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt')
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
class TUTAcousticScenes_2016_EvaluationSet(Dataset):
"""TUT Acoustic scenes 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 1, Acoustic scene classification
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-acoustic-scenes-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Acoustic Scenes 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
eval_filename = os.path.join(self.evaluation_setup_path, 'evaluate.txt')
if not os.path.isfile(self.meta_file) and os.path.isfile(eval_filename):
section_header('Generating meta file for dataset')
meta_data = {}
f = open(eval_filename, 'rt')
reader = csv.reader(f, delimiter='\t')
for row in reader:
if row[0] not in meta_data:
meta_data[row[0]] = row[1]
f.close()
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in meta_data:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = meta_data[file]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
def train(self, fold=0):
raise IOError('Train setup not available.')
# TUT Sound events 2016 development and evaluation sets
class TUTSoundEvents_2016_DevelopmentSet(Dataset):
"""TUT Sound events 2016 development dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-development')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, development dataset'
self.url = 'https://zenodo.org/record/45759'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 4
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.doc.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.doc.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.meta.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.meta.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': 'https://zenodo.org/record/45759/files/TUT-sound-events-2016-development.audio.zip',
'local_package': os.path.join(self.local_path, 'TUT-sound-events-2016-development.audio.zip'),
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
]
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'].rstrip() not in labels:
labels.append(item['event_label'].rstrip())
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'),
base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_train:
self.evaluation_data_train[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_train[fold]:
self.evaluation_data_train[fold][scene_label_] = []
if fold > 0:
with open(
os.path.join(self.evaluation_setup_path, scene_label_ + '_fold' + str(fold) + '_train.txt'),
'rt') as f:
for row in csv.reader(f, delimiter='\t'):
if len(row) == 5:
# Event meta
self.evaluation_data_train[fold][scene_label_].append({
'file': self.relative_to_absolute_path(row[0]),
'scene_label': row[1],
'event_onset': float(row[2]),
'event_offset': float(row[3]),
'event_label': row[4]
})
else:
data = []
for item in self.meta:
if item['scene_label'] == scene_label_:
if 'event_label' in item:
data.append({'file': self.relative_to_absolute_path(item['file']),
'scene_label': item['scene_label'],
'event_onset': item['event_onset'],
'event_offset': item['event_offset'],
'event_label': item['event_label'],
})
self.evaluation_data_train[0][scene_label_] = data
if scene_label:
return self.evaluation_data_train[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_train[fold][scene_label_]:
data.append(item)
return data
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(
os.path.join(self.evaluation_setup_path, scene_label_ + '_fold' + str(fold) + '_test.txt'),
'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append(
{'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.meta:
if scene_label_ in item:
if self.relative_to_absolute_path(item['file']) not in files:
data.append({'file': self.relative_to_absolute_path(item['file'])})
files.append(self.relative_to_absolute_path(item['file']))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
class TUTSoundEvents_2016_EvaluationSet(Dataset):
"""TUT Sound events 2016 evaluation dataset
This dataset is used in DCASE2016 - Task 3, Sound event detection in real life audio
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='TUT-sound-events-2016-evaluation')
self.authors = 'Annamaria Mesaros, Toni Heittola, and Tuomas Virtanen'
self.name_remote = 'TUT Sound Events 2016, evaluation dataset'
self.url = 'http://www.cs.tut.fi/sgn/arg/dcase2016/download/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Roland Edirol R-09'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 1
self.package_list = [
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'home'),
},
{
'remote_package': None,
'local_package': None,
'local_audio_path': os.path.join(self.local_path, 'audio', 'residential_area'),
},
]
@property
def scene_labels(self):
labels = ['home', 'residential_area']
labels.sort()
return labels
def event_label_count(self, scene_label=None):
return len(self.event_labels(scene_label=scene_label))
def event_labels(self, scene_label=None):
labels = []
for item in self.meta:
if scene_label is None or item['scene_label'] == scene_label:
if 'event_label' in item and item['event_label'] not in labels:
labels.append(item['event_label'])
labels.sort()
return labels
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file) and os.path.isdir(os.path.join(self.local_path, 'meta')):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for filename in self.audio_files:
raw_path, raw_filename = os.path.split(filename)
relative_path = self.absolute_to_relative(raw_path)
scene_label = relative_path.replace('audio', '')[1:]
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(self.local_path, relative_path.replace('audio', 'meta'),
base_filename + '.ann')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename),
scene_label,
float(annotation_file_row[0].replace(',', '.')),
float(annotation_file_row[1].replace(',', '.')),
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
def train(self, fold=0, scene_label=None):
raise IOError('Train setup not available.')
def test(self, fold=0, scene_label=None):
if fold not in self.evaluation_data_test:
self.evaluation_data_test[fold] = {}
for scene_label_ in self.scene_labels:
if scene_label_ not in self.evaluation_data_test[fold]:
self.evaluation_data_test[fold][scene_label_] = []
if fold > 0:
with open(os.path.join(self.evaluation_setup_path, scene_label + '_fold' + str(fold) + '_test.txt'),
'rt') as f:
for row in csv.reader(f, delimiter='\t'):
self.evaluation_data_test[fold][scene_label_].append(
{'file': self.relative_to_absolute_path(row[0])})
else:
data = []
files = []
for item in self.audio_files:
if scene_label_ in item:
if self.relative_to_absolute_path(item) not in files:
data.append({'file': self.relative_to_absolute_path(item)})
files.append(self.relative_to_absolute_path(item))
self.evaluation_data_test[0][scene_label_] = data
if scene_label:
return self.evaluation_data_test[fold][scene_label]
else:
data = []
for scene_label_ in self.scene_labels:
for item in self.evaluation_data_test[fold][scene_label_]:
data.append(item)
return data
# CHIME home
class CHiMEHome_DomesticAudioTag_DevelopmentSet(Dataset):
def __init__(self, data_path=None):
Dataset.__init__(self, data_path=data_path, name='CHiMeHome-audiotag-development')
self.authors = 'Peter Foster, Siddharth Sigtia, Sacha Krstulovic, Jon Barker, and Mark Plumbley'
self.name_remote = 'The CHiME-Home dataset is a collection of annotated domestic environment audio recordings.'
self.url = ''
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Unknown'
self.evaluation_folds = 10
self.package_list = [
{
'remote_package': 'https://archive.org/download/chime-home/chime_home.tar.gz',
'local_package': os.path.join(self.local_path, 'chime_home.tar.gz'),
'local_audio_path': os.path.join(self.local_path, 'chime_home', 'chunks'),
},
]
@property
def audio_files(self):
"""Get all audio files in the dataset, use only file from CHime-Home-refined set.
Parameters
----------
nothing
Returns
-------
files : list
audio files
"""
if self.files is None:
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(row[1])
self.files = []
for file in self.package_list:
path = file['local_audio_path']
if path:
l = os.listdir(path)
p = path.replace(self.local_path + os.path.sep, '')
for f in l:
fileName, fileExtension = os.path.splitext(f)
if fileExtension[1:] in self.audio_extensions and fileName in refined_files:
self.files.append(os.path.abspath(os.path.join(path, f)))
self.files.sort()
return self.files
def read_chunk_meta(self, meta_filename):
if os.path.isfile(meta_filename):
meta_file_handle = open(meta_filename, 'rt')
try:
meta_file_reader = csv.reader(meta_file_handle, delimiter=',')
data = {}
for meta_file_row in meta_file_reader:
data[meta_file_row[0]] = meta_file_row[1]
finally:
meta_file_handle.close()
return data
def tagcode_to_taglabel(self, tag):
map = {'c': 'child speech',
'm': 'adult male speech',
'f': 'adult female speech',
'v': 'video game/tv',
'p': 'percussive sound',
'b': 'broadband noise',
'o': 'other',
'S': 'silence/background',
'U': 'unidentifiable'
}
if tag in map:
return map[tag]
else:
return None
def on_after_extract(self):
"""After dataset packages are downloaded and extracted, meta-files are checked.
Legacy dataset meta files are converted to be compatible with current scheme.
Parameters
----------
nothing
Returns
-------
nothing
"""
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
scene_label = 'home'
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
annotation_filename = os.path.join(raw_path, base_filename + '.csv')
meta_data = self.read_chunk_meta(annotation_filename)
tags = []
for i, tag in enumerate(meta_data['majorityvote']):
if tag is 'b':
print file
if tag is not 'S' and tag is not 'U':
tags.append(self.tagcode_to_taglabel(tag))
tags = ';'.join(tags)
writer.writerow(
(os.path.join(relative_path, raw_filename), scene_label, meta_data['majorityvote'], tags))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
for target_tag in self.audio_tags:
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path,
'fold' + str(fold) + '_' + target_tag.replace('/', '-').replace(' ',
'_') + '_test.txt')):
all_folds_found = False
if not all_folds_found:
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
numpy.random.seed(475686)
kf = KFold(n=len(self.audio_files), n_folds=self.evaluation_folds, shuffle=True)
refined_files = []
with open(os.path.join(self.local_path, 'chime_home', 'chunks_refined.csv'), 'rt') as f:
for row in csv.reader(f, delimiter=','):
refined_files.append(
self.relative_to_absolute_path(os.path.join('chime_home', 'chunks', row[1] + '.wav')))
fold = 1
files = numpy.array(refined_files)
for train_index, test_index in kf:
train_files = files[train_index]
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow(
[os.path.join(relative_path, raw_filename), item['scene_label'], item['tag_string'],
';'.join(item['tags'])])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
item = self.file_meta(file)[0]
writer.writerow(
[os.path.join(relative_path, raw_filename), item['scene_label'], item['tag_string'],
';'.join(item['tags'])])
fold += 1
# Legacy datasets
# =====================================================
# DCASE 2013
# =====================================================
class DCASE2013_Scene_DevelopmentSet(Dataset):
"""DCASE 2013 Acoustic scene classification, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Public Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/29/scenes_stereo.zip?sequence=1',
'local_package': os.path.join(self.local_path, 'scenes_stereo.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file):
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
class DCASE2013_Scene_EvaluationSet(DCASE2013_Scene_DevelopmentSet):
"""DCASE 2013 Acoustic scene classification, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-scene-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP 2013 CASA Challenge - Private Dataset for Scene Classification Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_scene_classification_testset/scenes_stereo_testset.zip',
'local_package': os.path.join(self.local_path, 'scenes_stereo_testset.zip'),
'local_audio_path': os.path.join(self.local_path, 'scenes_stereo_testset'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
if not os.path.isfile(self.meta_file) or 1:
section_header('Generating meta file for dataset')
f = open(self.meta_file, 'wt')
try:
writer = csv.writer(f, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
label = os.path.splitext(os.path.split(file)[1])[0][:-2]
writer.writerow((os.path.join(relative_path, raw_filename), label))
finally:
f.close()
foot()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
section_header('Generating evaluation setup files for dataset')
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
classes = []
files = []
for item in self.meta:
classes.append(item['scene_label'])
files.append(item['file'])
files = numpy.array(files)
sss = StratifiedShuffleSplit(y=classes, n_iter=self.evaluation_folds, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
writer.writerow([os.path.join(raw_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
label = self.file_meta(file)[0]['scene_label']
writer.writerow([os.path.join(raw_path, raw_filename), label])
fold += 1
foot()
# Sound events
class DCASE2013_Event_DevelopmentSet(Dataset):
"""DCASE 2013 Sound event detection, development dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-development')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Public Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_development_OS/events_OS_development_v2.zip',
'local_package': os.path.join(self.local_path, 'events_OS_development_v2.zip'),
'local_audio_path': os.path.join(self.local_path, 'events_OS_development_v2'),
},
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_annotation.zip?sequence=9',
# 'local_package': os.path.join(self.local_path, 'singlesounds_annotation.zip'),
# 'local_audio_path': None,
# },
# {
# 'remote_package':'http://c4dm.eecs.qmul.ac.uk/rdr/bitstream/handle/123456789/28/singlesounds_stereo.zip?sequence=7',
# 'local_package': os.path.join(self.local_path, 'singlesounds_stereo.zip'),
# 'local_audio_path': os.path.join(self.local_path, 'singlesounds_stereo'),
# },
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('singlesounds_stereo') != -1:
annotation_filename = os.path.join(self.local_path, 'Annotation1', base_filename + '_bdm.txt')
label = base_filename[:-2]
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1], label, 'i'))
finally:
annotation_file_handle.close()
elif file.find('events_OS_development_v2') != -1:
annotation_filename = os.path.join(self.local_path, 'events_OS_development_v2',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
class DCASE2013_Event_EvaluationSet(Dataset):
"""DCASE 2013 Sound event detection, evaluation dataset
"""
def __init__(self, data_path='data'):
Dataset.__init__(self, data_path=data_path, name='DCASE2013-event-challenge')
self.authors = 'Dimitrios Giannoulis, Emmanouil Benetos, Dan Stowell, and Mark Plumbley'
self.name_remote = 'IEEE AASP CASA Challenge - Private Dataset for Event Detection Task'
self.url = 'http://www.elec.qmul.ac.uk/digitalmusic/sceneseventschallenge/'
self.audio_source = 'Field recording'
self.audio_type = 'Natural'
self.recording_device_model = 'Unknown'
self.microphone_model = 'Soundman OKM II Klassik/studio A3 electret microphone'
self.evaluation_folds = 5
self.package_list = [
{
'remote_package': 'https://archive.org/download/dcase2013_event_detection_testset_OS/dcase2013_event_detection_testset_OS.zip',
'local_package': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS.zip'),
'local_audio_path': os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS'),
}
]
def on_after_extract(self):
# Make legacy dataset compatible with DCASE2016 dataset scheme
scene_label = 'office'
if not os.path.isfile(self.meta_file):
meta_file_handle = open(self.meta_file, 'wt')
try:
writer = csv.writer(meta_file_handle, delimiter='\t')
for file in self.audio_files:
raw_path, raw_filename = os.path.split(file)
relative_path = self.absolute_to_relative(raw_path)
base_filename, file_extension = os.path.splitext(raw_filename)
if file.find('dcase2013_event_detection_testset_OS') != -1:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',
base_filename + '_v2.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
else:
annotation_filename = os.path.join(self.local_path, 'dcase2013_event_detection_testset_OS',
base_filename + '.txt')
if os.path.isfile(annotation_filename):
annotation_file_handle = open(annotation_filename, 'rt')
try:
annotation_file_reader = csv.reader(annotation_file_handle, delimiter='\t')
for annotation_file_row in annotation_file_reader:
writer.writerow((os.path.join(relative_path, raw_filename), scene_label,
annotation_file_row[0], annotation_file_row[1],
annotation_file_row[2], 'm'))
finally:
annotation_file_handle.close()
finally:
meta_file_handle.close()
all_folds_found = True
for fold in xrange(1, self.evaluation_folds):
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt')):
all_folds_found = False
if not os.path.isfile(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt')):
all_folds_found = False
if not all_folds_found:
# Construct training and testing sets. Isolated sound are used for training and
# polyphonic mixtures are used for testing.
if not os.path.isdir(self.evaluation_setup_path):
os.makedirs(self.evaluation_setup_path)
files = []
for item in self.meta:
if item['file'] not in files:
files.append(item['file'])
files = numpy.array(files)
f = numpy.zeros(len(files))
sss = StratifiedShuffleSplit(y=f, n_iter=5, test_size=0.3, random_state=0)
fold = 1
for train_index, test_index in sss:
# print("TRAIN:", train_index, "TEST:", test_index)
train_files = files[train_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_train.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in train_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
test_files = files[test_index]
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_test.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
writer.writerow([os.path.join(relative_path, raw_filename)])
with open(os.path.join(self.evaluation_setup_path, 'fold' + str(fold) + '_evaluate.txt'), 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for file in test_files:
raw_path, raw_filename = os.path.split(file)
relative_path = raw_path.replace(self.local_path + os.path.sep, '')
for item in self.meta:
if item['file'] == file:
writer.writerow([os.path.join(relative_path, raw_filename), item['scene_label'],
item['event_onset'], item['event_offset'], item['event_label']])
fold += 1
| mit |
dinrker/Ray_FEM | Plots/python_scripts/paper2/ex1_NumRay_ConvRate.py | 1 | 2663 | import numpy as np
import math
omega = np.array([376.991118430775,
502.654824574367,
753.982236861550,
1005.30964914873,
1507.96447372310,
2010.61929829747,
3015.92894744620])
omega = omega/np.pi
err_NPW_4 = np.array([0.00270157770281540,
0.00219734201275802,
0.00170110076048316,
0.00141722055515402,
0.00116206600585611,
0.000834283365116496,
0.000760021129535664])
err_NPW_6 = np.array([ 0.00155179897461391,
0.00117260261040538,
0.000956857981728328,
0.000729819228791556,
0.000586008771195049,
0.000542605986265505,
0.000427214395423089])
err_NPW_8 = np.array([ 0.00103175668543789,
0.000777913307163148,
0.000617847788595371,
0.000492830804433761,
0.000408654829864075,
0.000354809212835210,
0.000264580813005265])
import matplotlib.pyplot as plt
golden = 1.61803398875
width = 6
height = width/golden
fig = plt.figure(figsize=(width, height))
p1, = plt.loglog(omega[:len(err_NPW_4)], err_NPW_4, label=r'NPW = 4',
color='b', linewidth=2, linestyle='--', marker='o', markersize=8.0, zorder=2)
p2, = plt.loglog(omega[:len(err_NPW_6)], err_NPW_6, label=r'NPW = 6',
color='g', linewidth=2, linestyle= '--', marker='o', markersize=8.0, zorder=2)
p3, = plt.loglog(omega[:len(err_NPW_8)], err_NPW_8, label=r'NPW = 8',
color='k', linewidth=2, linestyle= '--', marker='o', markersize=8.0, zorder=2)
p4, = plt.loglog(omega[:len(err_NPW_4)], 0.75*err_NPW_4[0]/((omega[:len(err_NPW_4)]/(omega[0]))**0.65),
label=r'$\mathcal{O}(\omega^{-0.65})$', color='r', linewidth=2, linestyle= 'solid', markersize=8.0, zorder=2)
# p1, = plt.loglog(omega, err_NPW_4, label=r'$\Vert u_{\mathbf{d}_{ex}} - u_{ex}\Vert_{L^2(\Omega)}$',
# color='g', linewidth=2, linestyle='--', marker='o', markersize=8.0, zorder=2)
# p2, = plt.loglog(omega, err_NPW_6, label=r'$\mathcal{O}(\omega^{-1})$', color='r', linewidth=2,
# linestyle= '--', markersize=8.0, zorder=2) # linestyle= 'solid'
# plt.loglog(N_x**2, N_x**2 / 4.0e4, label=r' ', color='white', linewidth=0.0)
first_legend = plt.legend(handles=[p1, p2, p3], loc=1, ncol=1, frameon=False, fontsize=15)
ax = plt.gca().add_artist(first_legend)
plt.legend(handles=[p4], loc=3, ncol=3, frameon=False, fontsize=18)
# plt.legend(loc=0, ncol=1, frameon=False, fontsize=26)
# plt.title('Exact Ray-FEM',fontsize=20)
plt.xlabel(r'$\omega/\pi$', fontsize=18)
plt.ylabel(r'Rel $L^2$ Err', fontsize=18)
plt.gca().tick_params(labelsize=14)
plt.autoscale(True, 'both', True)
# plt.xlim(0.9*omega[0], 1.1*omega[len(err_NPW_4)-1])
plt.xlim(100, 1100)
plt.ylim(0.8*err_NPW_8[-1], 1.4*err_NPW_4[0])
plt.tight_layout(pad=0.5)
fig.savefig('ex1_NumRay_ConvRate.pdf')
plt.show()
plt.close('all')
| mit |
cheral/orange3 | Orange/tests/test_linear_regression.py | 7 | 4736 | # Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import unittest
import numpy as np
from Orange.data import Table
from Orange.preprocess import *
from Orange.regression import (LinearRegressionLearner,
RidgeRegressionLearner,
LassoRegressionLearner,
ElasticNetLearner,
ElasticNetCVLearner,
MeanLearner)
from Orange.evaluation import CrossValidation, RMSE
from sklearn import linear_model
class TestLinearRegressionLearner(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.housing = Table("housing")
def test_LinearRegression(self):
nrows = 1000
ncols = 3
x = np.random.randint(-20, 51, (nrows, ncols))
c = np.random.rand(ncols, 1) * 10 - 3
e = np.random.rand(nrows, 1) - 0.5
y = np.dot(x, c) + e
x1, x2 = np.split(x, 2)
y1, y2 = np.split(y, 2)
t = Table(x1, y1)
learn = LinearRegressionLearner()
clf = learn(t)
z = clf(x2)
self.assertTrue((abs(z.reshape(-1, 1) - y2) < 2.0).all())
def test_Regression(self):
ridge = RidgeRegressionLearner()
lasso = LassoRegressionLearner()
elastic = ElasticNetLearner()
elasticCV = ElasticNetCVLearner()
mean = MeanLearner()
learners = [ridge, lasso, elastic, elasticCV, mean]
res = CrossValidation(self.housing, learners, k=2)
rmse = RMSE(res)
for i in range(len(learners) - 1):
self.assertLess(rmse[i], rmse[-1])
def test_linear_scorer(self):
learner = LinearRegressionLearner()
scores = learner.score_data(self.housing)
self.assertEqual(
'LSTAT', self.housing.domain.attributes[np.argmax(scores[0])].name)
self.assertEqual(scores.shape[1], len(self.housing.domain.attributes))
def test_scorer(self):
learners = [LinearRegressionLearner(),
RidgeRegressionLearner(),
LassoRegressionLearner(alpha=0.01),
ElasticNetLearner(alpha=0.01)]
for learner in learners:
scores = learner.score_data(self.housing)
self.assertEqual(
'LSTAT',
self.housing.domain.attributes[np.argmax(scores[0])].name)
self.assertEqual(scores.shape[1],
len(self.housing.domain.attributes))
def test_scorer_feature(self):
learners = [LinearRegressionLearner(),
RidgeRegressionLearner(),
LassoRegressionLearner(alpha=0.01),
ElasticNetLearner(alpha=0.01)]
for learner in learners:
scores = learner.score_data(self.housing)
for i, attr in enumerate(self.housing.domain.attributes):
score = learner.score_data(self.housing, attr)
np.testing.assert_array_almost_equal(score, scores[:, i])
def test_coefficients(self):
data = Table([[11], [12], [13]], [0, 1, 2])
model = LinearRegressionLearner()(data)
self.assertAlmostEqual(float(model.intercept), -11)
self.assertEqual(len(model.coefficients), 1)
self.assertAlmostEqual(float(model.coefficients[0]), 1)
def test_comparison_with_sklearn(self):
alphas = [0.001, 0.1, 1, 10, 100]
learners = [(LassoRegressionLearner, linear_model.Lasso),
(RidgeRegressionLearner, linear_model.Ridge),
(ElasticNetLearner, linear_model.ElasticNet)]
for o_learner, s_learner in learners:
for a in alphas:
lr = o_learner(alpha=a)
o_model = lr(self.housing)
s_model = s_learner(alpha=a, fit_intercept=True)
s_model.fit(self.housing.X, self.housing.Y)
delta = np.sum(s_model.coef_ - o_model.coefficients)
self.assertAlmostEqual(delta, 0.0)
def test_comparison_elastic_net(self):
alphas = [0.001, 0.1, 1, 10, 100]
for a in alphas:
lasso = LassoRegressionLearner(alpha=a)
lasso_model = lasso(self.housing)
elastic = ElasticNetLearner(alpha=a, l1_ratio=1)
elastic_model = elastic(self.housing)
d = np.sum(lasso_model.coefficients - elastic_model.coefficients)
self.assertEqual(d, 0)
def test_linear_regression_repr(self):
learner = LinearRegressionLearner()
repr_text = repr(learner)
learner2 = eval(repr_text)
self.assertIsInstance(learner2, LinearRegressionLearner)
| bsd-2-clause |
pkruskal/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
MKridler/pyxley | examples/custom_react/project/app.py | 11 | 2196 | from flask import Flask
from flask import request, jsonify, render_template, make_response
import pandas as pd
import json
import sys
import glob
import numpy as np
import argparse
from react import jsx
from pyxley import UILayout
from pyxley.filters import SelectButton
from pyxley.charts import Chart
from collections import OrderedDict
from helper import NewChart
parser = argparse.ArgumentParser(description="Flask Template")
parser.add_argument("--env", help="production or local", default="local")
args = parser.parse_args()
TITLE = "Pyxley"
scripts = [
"./bower_components/jquery/dist/jquery.min.js",
"./bower_components/d3/d3.min.js",
"./bower_components/nvd3/build/nv.d3.js",
"./chartfunc.js",
"./bower_components/require/build/require.min.js",
"./bower_components/react/react.js",
"./bower_components/react-bootstrap/react-bootstrap.min.js",
"./bower_components/pyxley/build/pyxley.js",
]
css = [
"./bower_components/bootstrap/dist/css/bootstrap.min.css",
"./bower_components/nvd3/build/nv.d3.min.css",
"./css/main.css"
]
transformer = jsx.JSXTransformer()
jsx_input = "static/jsx/"
jsx_output = "static/js/"
for f in glob.glob(jsx_input+"*.js"):
transformer.transform(f,js_path=jsx_output+f.split('/')[-1])
# Make a UI
ui = UILayout(
"RunLayout",
"./static/js/custom.js",
"component_id")
# Read in the data and stack it, so that we can filter on columns
df = pd.read_csv("./static/formatted_run.csv")
# Make a Button
choices = ["Heart Rate", "Pace", "Distance"]
btn = SelectButton("Data", choices, "Data", "Heart Rate")
ui.add_filter(btn)
# Add our new chart
colors = ["#847c77", "#ff5c61"];
nc = NewChart("Seconds", "value", "Altitude", df,
init_params={"Data": "Heart Rate"}, colors=colors)
ui.add_chart(nc)
app = Flask(__name__)
sb = ui.render_layout(app, "./static/layout.js")
@app.route('/', methods=["GET"])
@app.route('/index', methods=["GET"])
def index():
_scripts = ["./layout.js", "./js/navbar.js"]
return render_template('index.html',
title=TITLE,
base_scripts=scripts,
page_scripts=_scripts,
css=css)
if __name__ == "__main__":
app.run(debug=True) | mit |
erh3cq/hyperspy | hyperspy/drawing/_markers/vertical_line_segment.py | 4 | 3486 | # -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from hyperspy.drawing.marker import MarkerBase
class VerticalLineSegment(MarkerBase):
"""Vertical line segment marker that can be added to the signal figure
Parameters
----------
x : array or float
The position of line segment in x.
If float, the marker is fixed.
If array, the marker will be updated when navigating. The array should
have the same dimensions in the navigation axes.
y1 : array or float
The position of the start of the line segment in x.
see x1 arguments
y2 : array or float
The position of the start of the line segment in y.
see x1 arguments
kwargs :
Keywords argument of axvline valid properties (i.e. recognized by
mpl.plot).
Example
-------
>>> im = hs.signals.Signal2D(np.zeros((100, 100)))
>>> m = hs.plot.markers.vertical_line_segment(
>>> x=20, y1=30, y2=70, linewidth=4, color='red', linestyle='dotted')
>>> im.add_marker(m)
Add a marker permanently to a marker
>>> im = hs.signals.Signal2D(np.zeros((60, 60)))
>>> m = hs.plot.markers.vertical_line_segment(x=10, y1=20, y2=50)
>>> im.add_marker(m, permanent=True)
"""
def __init__(self, x, y1, y2, **kwargs):
MarkerBase.__init__(self)
lp = {'color': 'black', 'linewidth': 1}
self.marker_properties = lp
self.set_data(x1=x, y1=y1, y2=y2)
self.set_marker_properties(**kwargs)
self.name = 'vertical_line_segment'
def __repr__(self):
string = "<marker.{}, {} (x={},y1={},y2={},color={})>".format(
self.__class__.__name__,
self.name,
self.get_data_position('x1'),
self.get_data_position('y1'),
self.get_data_position('y2'),
self.marker_properties['color'],
)
return(string)
def update(self):
if self.auto_update is False:
return
self._update_segment()
def _plot_marker(self):
self.marker = self.ax.vlines(0, 0, 1, **self.marker_properties)
self._update_segment()
def _update_segment(self):
segments = self.marker.get_segments()
segments[0][0, 0] = self.get_data_position('x1')
segments[0][1, 0] = segments[0][0, 0]
if self.get_data_position('y1') is None:
segments[0][0, 1] = plt.getp(self.marker.axes, 'ylim')[0]
else:
segments[0][0, 1] = self.get_data_position('y1')
if self.get_data_position('y2') is None:
segments[0][1, 1] = plt.getp(self.marker.axes, 'ylim')[1]
else:
segments[0][1, 1] = self.get_data_position('y2')
self.marker.set_segments(segments)
| gpl-3.0 |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/_pylab_helpers.py | 8 | 4008 | """
Manage figures for pyplot interface.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import sys
import gc
import atexit
def error_msg(msg):
print(msg, file=sys.stderr)
class Gcf(object):
"""
Singleton to manage a set of integer-numbered figures.
This class is never instantiated; it consists of two class
attributes (a list and a dictionary), and a set of static
methods that operate on those attributes, accessing them
directly as class attributes.
Attributes:
*figs*:
dictionary of the form {*num*: *manager*, ...}
*_activeQue*:
list of *managers*, with active one at the end
"""
_activeQue = []
figs = {}
@classmethod
def get_fig_manager(cls, num):
"""
If figure manager *num* exists, make it the active
figure and return the manager; otherwise return *None*.
"""
manager = cls.figs.get(num, None)
if manager is not None:
cls.set_active(manager)
return manager
@classmethod
def destroy(cls, num):
"""
Try to remove all traces of figure *num*.
In the interactive backends, this is bound to the
window "destroy" and "delete" events.
"""
if not cls.has_fignum(num):
return
manager = cls.figs[num]
manager.canvas.mpl_disconnect(manager._cidgcf)
# There must be a good reason for the following careful
# rebuilding of the activeQue; what is it?
oldQue = cls._activeQue[:]
cls._activeQue = []
for f in oldQue:
if f != manager:
cls._activeQue.append(f)
del cls.figs[num]
manager.destroy()
gc.collect(1)
@classmethod
def destroy_fig(cls, fig):
"*fig* is a Figure instance"
num = None
for manager in six.itervalues(cls.figs):
if manager.canvas.figure == fig:
num = manager.num
break
if num is not None:
cls.destroy(num)
@classmethod
def destroy_all(cls):
# this is need to ensure that gc is available in corner cases
# where modules are being torn down after install with easy_install
import gc # noqa
for manager in list(cls.figs.values()):
manager.canvas.mpl_disconnect(manager._cidgcf)
manager.destroy()
cls._activeQue = []
cls.figs.clear()
gc.collect(1)
@classmethod
def has_fignum(cls, num):
"""
Return *True* if figure *num* exists.
"""
return num in cls.figs
@classmethod
def get_all_fig_managers(cls):
"""
Return a list of figure managers.
"""
return list(cls.figs.values())
@classmethod
def get_num_fig_managers(cls):
"""
Return the number of figures being managed.
"""
return len(cls.figs)
@classmethod
def get_active(cls):
"""
Return the manager of the active figure, or *None*.
"""
if len(cls._activeQue) == 0:
return None
else:
return cls._activeQue[-1]
@classmethod
def set_active(cls, manager):
"""
Make the figure corresponding to *manager* the active one.
"""
oldQue = cls._activeQue[:]
cls._activeQue = []
for m in oldQue:
if m != manager:
cls._activeQue.append(m)
cls._activeQue.append(manager)
cls.figs[manager.num] = manager
@classmethod
def draw_all(cls, force=False):
"""
Redraw all figures registered with the pyplot
state machine.
"""
for f_mgr in cls.get_all_fig_managers():
if force or f_mgr.canvas.figure.stale:
f_mgr.canvas.draw_idle()
atexit.register(Gcf.destroy_all)
| mit |
jason-neal/equanimous-octo-tribble | Notebooks/Stride_testing.py | 1 | 6037 |
# coding: utf-8
# # Testing numpy Stride
# For snr calculation windowing
# In[21]:
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from astropy.io import fits
from numpy.lib import stride_tricks
get_ipython().magic('matplotlib inline')
# In[22]:
fname = "Test_spectra.fits"
data = fits.getdata(fname)
hdr = fits.getheader(fname)
wl = data["Wavelength"]
I = data["Extracted_DRACS"]
# print(type(I))
print(I.dtype)
wl = np.array(wl, dtype="float64") # Turn >f4 into float64
I = np.array(I, dtype="float64") # Turn >f4 into float64
print(I.dtype)
print(I)
# In[ ]:
binsize = 100
# Try using stride on np.array
# striding
nums = np.arange(len(I), dtype="int")
print("itemsize", nums.itemsize, "dtype", nums.dtype)
hop_length = 1
# stride_tests with numbers
frame_length = binsize
num_frames = 1 + (len(nums) - frame_length) / hop_length
row_stride = nums.itemsize * hop_length # *hopesize
print(frame_length)
print(num_frames)
print(row_stride)
col_stride = nums.itemsize
nums_strided = stride_tricks.as_strided(nums, shape=(num_frames, frame_length), strides=(row_stride, col_stride))
print("nums", nums)
print("nums_strided =", nums_strided)
# row wise transform
row_sum = np.sum(nums_strided, axis=1)
# print(row_sum)
snr = 1 / np.std(nums_strided, axis=1)
print(snr)
# In[ ]:
# with I
frame_length = binsize
num_frames = 1 + (len(I) - frame_length) / hop_length
row_stride = I.itemsize * hop_length # *hopesize
print(frame_length)
print(num_frames)
print(row_stride)
col_stride = I.itemsize
I_strided = stride_tricks.as_strided(I, shape=(num_frames, frame_length), strides=(row_stride, col_stride))
# print("nums", I)
# print("nums_strided =", I_strided)
snr = 1 / np.std(I_strided, axis=1)
print(snr)
# In[ ]:
plt.plot(snr)
plt.show()
# In[23]:
def strided_snr(data, frame_length, hop_length=1):
num_frames = 1 + (len(data) - frame_length)/hop_length
row_stride = data.itemsize * hop_length # *hopesize
col_stride = data.itemsize
data_strided = stride_tricks.as_strided(data, shape=(num_frames, frame_length), strides=(row_stride, col_stride))
print("length of data_strided", len(data_strided))
snr = 1/np.std( data_strided, axis=1)
# print("frame_length", frame_length)
# print("num_frames", num_frames)
# print("len(snr)", len(snr))
# print(snr)
# zeropad to make uniform length of spectra
missing_size = len(data) - len(snr)
print("missing size", missing_size)
before = missing_size // 2
end = missing_size // 2
if missing_size % 2 is not 0:
print("missing size is not even")
padded_snr = np.pad(snr, (before, end), "constant")
# print("padded length", len(padded_snr))
# print(padded_snr)
return padded_snr
def strided_sum(data, frame_length, hop_length=1):
num_frames = 1 + (len(data) - frame_length) / hop_length
row_stride = data.itemsize * hop_length # *hopesize
col_stride = data.itemsize
data_strided = stride_tricks.as_strided(data, shape=(num_frames, frame_length), strides=(row_stride, col_stride))
print("length of data_strided", len(data_strided))
print("binsize", frame_length)
print("hop_length", hop_length)
print(data_strided)
total = np.sum(data_strided, axis=1)
# print("frame_length", frame_length)
# print("num_frames", num_frames)
# print("len(snr)", len(snr))
# print(snr)
# zeropad to make uniform length of spectra
missing_size = len(data) - len(total)
pad_size = (len(data) - len(total)) // 2
# print("missing size", missing_size)
before = missing_size // 2
end = missing_size // 2
if missing_size % 2 is not 0:
print("missing size is not even")
padded_total = np.pad(total, (pad_size, pad_size), "constant")
# print("padded length", len(padded_snr))
# print(padded_snr)
return padded_total
# This doesn't seem to work that well with pandas not sure why
# store_array = np.empty((1024, len(bins)), dtype=data.dtype)
# for i, bin in enumerate(bins):
# store_array[:, i] = strided_snr(I, bin)
# In[30]:
# loop over the different bin sizes
bins = np.arange(3, 51, 2)
hopper = 1
store_list = []
for i, b in enumerate(bins):
store_list.append(strided_snr(I, b, hop_length=hopper))
print("done")
# In[31]:
# print(store_array)
print(store_list)
# In[32]:
# turn into a pandas dataframe
# dataframe = pd.DataFrame(data=store_array, columns=range(1024), index=bins)
# dataframe = pd.DataFrame(store_array, index=bins, columns=list(range(1024)))
# print(dataframe)
# print(dataframe.dtypes)
# In[33]:
df_list = pd.DataFrame(store_list, index=bins, columns=np.round(wl, 2))
print(df_list)
# In[36]:
sns.set()
cmap = sns.diverging_palette(220, 10, as_cmap=True)
ax = sns.heatmap(store_list, cmap=cmap, xticklabels=200, vmax=300, vmin=10)
# ax = sns.heatmap(df_list)
# plt.xticks(np.arange(int(np.min(wl)), int(np.max(wl) + 1), 1.0))
ax.set(ylabel="Binsize", xlabel="Wavelenght")
# In[37]:
# seaborn heatmap plot
sns.set()
cmap = sns.diverging_palette(220, 10, as_cmap=True)
ax = sns.heatmap(df_list, xticklabels=200, vmax=300, vmin=10)
# ax = sns.heatmap(df_list)
# plt.xticks(np.arange(int(np.min(wl)), int(np.max(wl) + 1), 1.0))
ax.set(ylabel="Binsize",
xlabel="Wavelenght")
# In[35]:
# ax = sns.heatmap(store_list)
wl[50]-wl[0]
# In[ ]:
# # test on known data
# In[17]:
data = np.arange(20)
binsizes = range(1, 6, 2)
store = []
# opt = np.get_printoptions()
# np.set_printoptions(threshold='nan')
for b in binsizes:
store.append(strided_sum(data, b))
# np.set_printoptions(**opt)
# In[18]:
SNRrand = pd.DataFrame(store, index=binsizes)
print(SNRrand)
# In[19]:
sns.set()
# cmap = sns.diverging_palette(220, 10, as_cmap=True)
ax = sns.heatmap(SNRrand, xticklabels=20)
# ax = sns.heatmap(df_list)
# plt.xticks(np.arange(int(np.min(wl)), int(np.max(wl) + 1), 1.0))
ax.set(ylabel="Binsize",
xlabel="Wavelenght")
# In[ ]:
# In[ ]:
| mit |
cmorgan/zipline | tests/test_tradesimulation.py | 21 | 2735 | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
from nose_parameterized import parameterized
from six.moves import range
from unittest import TestCase
from zipline import TradingAlgorithm
from zipline.test_algorithms import NoopAlgorithm
from zipline.utils import factory
class BeforeTradingAlgorithm(TradingAlgorithm):
def __init__(self, *args, **kwargs):
self.before_trading_at = []
super(BeforeTradingAlgorithm, self).__init__(*args, **kwargs)
def before_trading_start(self, data):
self.before_trading_at.append(self.datetime)
FREQUENCIES = {'daily': 0, 'minute': 1} # daily is less frequent than minute
class TestTradeSimulation(TestCase):
def test_minutely_emissions_generate_performance_stats_for_last_day(self):
params = factory.create_simulation_parameters(num_days=1,
data_frequency='minute',
emission_rate='minute')
algo = NoopAlgorithm(sim_params=params)
algo.run(source=[], overwrite_sim_params=False)
self.assertEqual(algo.perf_tracker.day_count, 1.0)
@parameterized.expand([('%s_%s_%s' % (num_days, freq, emission_rate),
num_days, freq, emission_rate)
for freq in FREQUENCIES
for emission_rate in FREQUENCIES
for num_days in range(1, 4)
if FREQUENCIES[emission_rate] <= FREQUENCIES[freq]])
def test_before_trading_start(self, test_name, num_days, freq,
emission_rate):
params = factory.create_simulation_parameters(
num_days=num_days, data_frequency=freq,
emission_rate=emission_rate)
algo = BeforeTradingAlgorithm(sim_params=params)
algo.run(source=[], overwrite_sim_params=False)
self.assertEqual(algo.perf_tracker.day_count, num_days)
self.assertTrue(params.trading_days.equals(
pd.DatetimeIndex(algo.before_trading_at)),
"Expected %s but was %s."
% (params.trading_days, algo.before_trading_at))
| apache-2.0 |
flinz/nest-simulator | topology/doc/user_manual_scripts/connections.py | 3 | 18279 | # -*- coding: utf-8 -*-
#
# connections.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# create connectivity figures for topology manual
import nest
import nest.topology as tp
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d.axes3d import Axes3D
import numpy as np
def beautify_layer(l, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
top = nest.GetStatus(l)[0]['topology']
ctr = top['center']
ext = top['extent']
if xticks is None:
if 'rows' in top:
dx = float(ext[0]) / top['columns']
dy = float(ext[1]) / top['rows']
xticks = ctr[0] - ext[0] / 2. + dx / 2. + dx * np.arange(
top['columns'])
yticks = ctr[1] - ext[1] / 2. + dy / 2. + dy * np.arange(
top['rows'])
if xlim is None:
xlim = [ctr[0] - ext[0] / 2. - dx / 2., ctr[0] + ext[
0] / 2. + dx / 2.] # extra space so extent is visible
ylim = [ctr[1] - ext[1] / 2. - dy / 2., ctr[1] + ext[1] / 2. + dy / 2.]
else:
ext = [xlim[1] - xlim[0], ylim[1] - ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
def conn_figure(fig, layer, connd, targets=None, showmask=True, showkern=False,
xticks=range(-5, 6), yticks=range(-5, 6),
xlim=[-5.5, 5.5], ylim=[-5.5, 5.5]):
if targets is None:
targets = ((tp.FindCenterElement(layer), 'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=60)
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=20,
kernel_color='green')
beautify_layer(layer, fig,
xlim=xlim, ylim=ylim, xticks=xticks, yticks=yticks,
xlabel='', ylabel='')
fig.gca().grid(False)
# -----------------------------------------------
# Simple connection
# { conn1 #}
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_neuron'})
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]}}}
tp.ConnectLayers(l, l, conndict)
# { end #}
fig = plt.figure()
fig.add_subplot(121)
conn_figure(fig, l, conndict,
targets=((tp.FindCenterElement(l), 'red'),
(tp.FindNearestElement(l, [4., 5.]), 'yellow')))
# same another time, with periodic bcs
lpbc = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_neuron', 'edge_wrap': True})
tp.ConnectLayers(lpbc, lpbc, conndict)
fig.add_subplot(122)
conn_figure(fig, lpbc, conndict, showmask=False,
targets=((tp.FindCenterElement(lpbc), 'red'),
(tp.FindNearestElement(lpbc, [4., 5.]), 'yellow')))
plt.savefig('../user_manual_figures/conn1.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def free_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2))
fig = plt.figure()
# { conn2r #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]}}}
# { end #}
free_mask_fig(fig, 231, conndict)
# { conn2ro #}
conndict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-2., -1.],
'upper_right': [2., 1.]},
'anchor': [-1.5, -1.5]}}
# { end #}
free_mask_fig(fig, 234, conndict)
# { conn2c #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0}}}
# { end #}
free_mask_fig(fig, 232, conndict)
# { conn2co #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 2.0},
'anchor': [-2.0, 0.0]}}
# { end #}
free_mask_fig(fig, 235, conndict)
# { conn2d #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.}}}
# { end #}
free_mask_fig(fig, 233, conndict)
# { conn2do #}
conndict = {'connection_type': 'divergent',
'mask': {'doughnut': {'inner_radius': 1.5,
'outer_radius': 3.},
'anchor': [1.5, 1.5]}}
# { end #}
free_mask_fig(fig, 236, conndict)
plt.savefig('../user_manual_figures/conn2.png', bbox_inches='tight')
# -----------------------------------------------
# 3d masks
def conn_figure_3d(fig, layer, connd, targets=None, showmask=True,
showkern=False,
xticks=range(-5, 6), yticks=range(-5, 6),
xlim=[-5.5, 5.5], ylim=[-5.5, 5.5]):
if targets is None:
targets = ((tp.FindCenterElement(layer), 'red'),)
tp.PlotLayer(layer, fig=fig, nodesize=20, nodecolor=(.5, .5, 1.))
for src, clr in targets:
if showmask:
mask = connd['mask']
else:
mask = None
if showkern:
kern = connd['kernel']
else:
kern = None
tp.PlotTargets(src, layer, fig=fig, mask=mask, kernel=kern,
src_size=250, tgt_color=clr, tgt_size=60,
kernel_color='green')
ax = fig.gca()
ax.set_aspect('equal', 'box')
plt.draw()
def free_mask_3d_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer(
{'rows': 11, 'columns': 11, 'layers': 11, 'extent': [11., 11., 11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc, projection='3d')
conn_figure_3d(fig, l, cdict, xticks=range(-5, 6, 2),
yticks=range(-5, 6, 2))
fig = plt.figure()
# { conn_3d_a #}
conndict = {'connection_type': 'divergent',
'mask': {'box': {'lower_left': [-2., -1., -1.],
'upper_right': [2., 1., 1.]}}}
# { end #}
free_mask_3d_fig(fig, 121, conndict)
# { conn_3d_b #}
conndict = {'connection_type': 'divergent',
'mask': {'spherical': {'radius': 2.5}}}
# { end #}
free_mask_3d_fig(fig, 122, conndict)
plt.savefig('../user_manual_figures/conn_3d.png', bbox_inches='tight')
# -----------------------------------------------
# grid masks
def grid_mask_fig(fig, loc, cdict):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2),
showmask=False)
fig = plt.figure()
# { conn3 #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5}}}
# { end #}
grid_mask_fig(fig, 131, conndict)
# { conn3c #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': 1, 'column': 2}}}
# { end #}
grid_mask_fig(fig, 132, conndict)
# { conn3x #}
conndict = {'connection_type': 'divergent',
'mask': {'grid': {'rows': 3, 'columns': 5},
'anchor': {'row': -1, 'column': 2}}}
# { end #}
grid_mask_fig(fig, 133, conndict)
plt.savefig('../user_manual_figures/conn3.png', bbox_inches='tight')
# -----------------------------------------------
# free masks
def kernel_fig(fig, loc, cdict, showkern=True):
nest.ResetKernel()
l = tp.CreateLayer({'rows': 11, 'columns': 11, 'extent': [11., 11.],
'elements': 'iaf_neuron'})
tp.ConnectLayers(l, l, cdict)
fig.add_subplot(loc)
conn_figure(fig, l, cdict, xticks=range(-5, 6, 2), yticks=range(-5, 6, 2),
showkern=showkern)
fig = plt.figure()
# { conn4cp #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': 0.5}
# { end #}
kernel_fig(fig, 231, conndict)
# { conn4g #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.}}}
# { end #}
kernel_fig(fig, 232, conndict)
# { conn4gx #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}, 'anchor': [1.5, 1.5]},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.,
'anchor': [1.5, 1.5]}}}
# { end #}
kernel_fig(fig, 233, conndict)
plt.draw()
# { conn4cut #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 1.,
'cutoff': 0.5}}}
# { end #}
kernel_fig(fig, 234, conndict)
# { conn42d #}
conndict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 4.}},
'kernel': {'gaussian2D': {'p_center': 1.0,
'sigma_x': 1., 'sigma_y': 3.}}}
# { end #}
kernel_fig(fig, 235, conndict, showkern=False)
plt.savefig('../user_manual_figures/conn4.png', bbox_inches='tight')
# -----------------------------------------------
def wd_fig(fig, loc, ldict, cdict, what, rpos=None,
xlim=[-1, 51], ylim=[0, 1], xticks=range(0, 51, 5),
yticks=np.arange(0., 1.1, 0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
if rpos is None:
rn = nest.GetLeaves(l)[0][:1] # first node
else:
rn = tp.FindNearestElement(l, rpos)
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
vals = np.array([sd[what] for sd in cstat])
tgts = [sd['target'] for sd in cstat]
locs = np.array(tp.GetPosition(tgts))
ax.plot(locs[:, 0], vals, 'o', mec='none', mfc=clr, label=label)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xticks(xticks)
ax.set_yticks(yticks)
fig = plt.figure()
# { conn5lin #}
ldict = {'rows': 1, 'columns': 51,
'extent': [51., 1.], 'center': [25., 0.],
'elements': 'iaf_neuron'}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
# { end #}
wd_fig(fig, 311, ldict, cdict, 'weight', label='Weight')
wd_fig(fig, 311, ldict, cdict, 'delay', label='Delay', clr='red')
fig.gca().legend()
lpdict = {'rows': 1, 'columns': 51, 'extent': [51., 1.], 'center': [25., 0.],
'elements': 'iaf_neuron', 'edge_wrap': True}
# { conn5linpbc #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}},
'delays': {'linear': {'c': 0.1, 'a': 0.02}}}
# { end #}
wd_fig(fig, 312, lpdict, cdict, 'weight', label='Weight')
wd_fig(fig, 312, lpdict, cdict, 'delay', label='Delay', clr='red')
fig.gca().legend()
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'linear': {'c': 1.0, 'a': -0.05, 'cutoff': 0.0}}}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Linear',
rpos=[25., 0.], clr='orange')
# { conn5exp #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'exponential': {'a': 1., 'tau': 5.}}}
# { end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Exponential',
rpos=[25., 0.])
# { conn5gauss #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'gaussian': {'p_center': 1., 'sigma': 5.}}}
# { end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Gaussian', clr='green',
rpos=[25., 0.])
# { conn5uniform #}
cdict = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-25.5, -0.5],
'upper_right': [25.5, 0.5]}},
'weights': {'uniform': {'min': 0.2, 'max': 0.8}}}
# { end #}
wd_fig(fig, 313, ldict, cdict, 'weight', label='Uniform', clr='red',
rpos=[25., 0.])
fig.gca().legend()
plt.savefig('../user_manual_figures/conn5.png', bbox_inches='tight')
# --------------------------------
def pn_fig(fig, loc, ldict, cdict,
xlim=[0., .5], ylim=[0, 3.5], xticks=range(0, 51, 5),
yticks=np.arange(0., 1.1, 0.2), clr='blue',
label=''):
nest.ResetKernel()
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict)
ax = fig.add_subplot(loc)
rn = nest.GetLeaves(l)[0]
conns = nest.GetConnections(rn)
cstat = nest.GetStatus(conns)
srcs = [sd['source'] for sd in cstat]
tgts = [sd['target'] for sd in cstat]
dist = np.array(tp.Distance(srcs, tgts))
ax.hist(dist, bins=50, histtype='stepfilled', normed=True)
r = np.arange(0., 0.51, 0.01)
plt.plot(r, 2 * np.pi * r * (1 - 2 * r) * 12 / np.pi, 'r-', lw=3,
zorder=-10)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
"""ax.set_xticks(xticks)
ax.set_yticks(yticks)"""
# ax.set_aspect(100, 'box')
ax.set_xlabel('Source-target distance d')
ax.set_ylabel('Connection probability pconn(d)')
fig = plt.figure()
# { conn6 #}
pos = [[np.random.uniform(-1., 1.), np.random.uniform(-1., 1.)]
for j in range(1000)]
ldict = {'positions': pos, 'extent': [2., 2.],
'elements': 'iaf_neuron', 'edge_wrap': True}
cdict = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 1.0}},
'kernel': {'linear': {'c': 1., 'a': -2., 'cutoff': 0.0}},
'number_of_connections': 50,
'allow_multapses': True, 'allow_autapses': False}
# { end #}
pn_fig(fig, 111, ldict, cdict)
plt.savefig('../user_manual_figures/conn6.png', bbox_inches='tight')
# -----------------------------
# { conn7 #}
nest.ResetKernel()
nest.CopyModel('iaf_neuron', 'pyr')
nest.CopyModel('iaf_neuron', 'in')
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'}}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2, -0.2],
'upper_right': [0.2, 0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'}}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
# { end #}
# ----------------------------
# { conn8 #}
nest.ResetKernel()
nest.CopyModel('iaf_neuron', 'pyr')
nest.CopyModel('iaf_neuron', 'in')
nest.CopyModel('static_synapse', 'exc', {'weight': 2.0})
nest.CopyModel('static_synapse', 'inh', {'weight': -8.0})
ldict = {'rows': 10, 'columns': 10, 'elements': ['pyr', 'in']}
cdict_p2i = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.5}},
'kernel': 0.8,
'sources': {'model': 'pyr'},
'targets': {'model': 'in'},
'synapse_model': 'exc'}
cdict_i2p = {'connection_type': 'divergent',
'mask': {'rectangular': {'lower_left': [-0.2, -0.2],
'upper_right': [0.2, 0.2]}},
'sources': {'model': 'in'},
'targets': {'model': 'pyr'},
'synapse_model': 'inh'}
l = tp.CreateLayer(ldict)
tp.ConnectLayers(l, l, cdict_p2i)
tp.ConnectLayers(l, l, cdict_i2p)
# { end #}
# ----------------------------
# { conn9 #}
nrns = tp.CreateLayer({'rows': 20,
'columns': 20,
'elements': 'iaf_neuron'})
stim = tp.CreateLayer({'rows': 1,
'columns': 1,
'elements': 'poisson_generator'})
cdict_stim = {'connection_type': 'divergent',
'mask': {'circular': {'radius': 0.1},
'anchor': [0.2, 0.2]}}
tp.ConnectLayers(stim, nrns, cdict_stim)
# { end #}
# ----------------------------
# { conn10 #}
rec = tp.CreateLayer({'rows': 1,
'columns': 1,
'elements': 'spike_detector'})
cdict_rec = {'connection_type': 'convergent',
'mask': {'circular': {'radius': 0.1},
'anchor': [-0.2, 0.2]}}
tp.ConnectLayers(nrns, rec, cdict_rec)
# { end #}
| gpl-2.0 |
Zsailer/epistasis | epistasis/validate.py | 2 | 1868 | import numpy as np
import pandas as pd
from .stats import split_gpm, pearson
def k_fold(gpm, model, k=10):
"""Cross-validation using K-fold validation on a seer.
"""
# Get index.
idx = np.copy(gpm.index)
# Shuffle index
np.random.shuffle(idx)
# Get subsets.
subsets = np.array_split(idx, k)
subsets_idx = np.arange(len(subsets))
# Do k-fold
scores = []
for i in range(k):
# Split index into train/test subsets
train_idx = np.concatenate(np.delete(subsets, i))
test_idx = subsets[i]
# Split genotype-phenotype map
train, test = split_gpm(gpm, idx=train_idx)
# Fit model.
model.add_gpm(train)
model.fit()
# Score validation set
pobs = test.phenotypes
pred = model.predict(X=test.genotypes)
score = pearson(pobs, pred)**2
scores.append(score)
return scores
def holdout(gpm, model, size=1, repeat=1):
"""Validate a model by holding-out parts of the data.
"""
train_scores = []
test_scores = []
model.add_gpm(gpm)
X = model._X()
for i in range(repeat):
# Get index.
idx = np.copy(gpm.index)
# Shuffle index
np.random.shuffle(idx)
# Split model matriay to cross validate).
train_idx = idx[:size]
test_idx = idx[size:]
train_X = X[train_idx, :]
test_X = X[test_idx, :]
# Train the model
model.fit(X=train_X, y=gpm.phenotypes[train_idx])
train_p = model.predict(X=train_X)
train_s = pearson(train_p, gpm.phenotypes[train_idx])**2
train_scores.append(train_s)
# Test the model
test_p = model.predict(X=test_X)
test_s = pearson(test_p, gpm.phenotypes[test_idx])**2
test_scores.append(test_s)
return train_scores, test_scores
| unlicense |
yonglehou/scikit-learn | sklearn/utils/extmath.py | 142 | 21102 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
out: array-like, shape: (M, N), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N)
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = check_array(X, dtype=np.float)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
Silmathoron/PyNeurActiv | doc/examples/analyzing_raters.py | 2 | 1210 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
""" Using a custom recorder """
from pprint import pprint
import numpy as np
import nest
import nngt
from nngt.simulation import monitor_nodes
import PyNeurActiv as pna
import matplotlib.pyplot as plt
num_omp = 5
nest.SetKernelStatus({'local_num_threads': num_omp, 'overwrite_files': True})
''' Make a neuronal population '''
di_param = {
'V_reset': -58.,
'V_peak': 0.0,
'V_th': -50.,
'I_e': 300.,
'g_L': 9.,
'tau_w': 300.,
'E_L': -70.,
'Delta_T': 2.,
'a': 2.,
'b': 60.,
'C_m': 200.,
'V_m': -60.,
'w': 0.,
'tau_syn_ex': 0.2
}
pop = nngt.NeuralPop.uniform(
1000, neuron_model='aeif_psc_alpha', neuron_param=di_param)
net = nngt.generation.erdos_renyi(
avg_deg=100, nodes=1000, population=pop, weights=43.)
gids = net.to_nest()
''' Record from it '''
rec_param = [{'to_file': True}]
recorder, recorded = monitor_nodes(gids, params=rec_param, network=net)
nest.Simulate(2000.)
activity = pna.analysis.raster_analysis(recorder[0], limits=[300., np.inf])
''' Plot the spikes with sorted neurons '''
pprint(activity.properties())
pna.plot.raster_plot(activity, sort='spikes')
plt.show()
| gpl-3.0 |
fstagni/DIRAC | Core/Utilities/Graphs/GraphUtilities.py | 6 | 14279 | """ GraphUtilities is a a collection of utility functions and classes used
in the DIRAC Graphs package.
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
__RCSID__ = "$Id$"
import os
import time
import datetime
import calendar
import math
import pytz
import numpy
from matplotlib.ticker import ScalarFormatter
from matplotlib.dates import AutoDateLocator, AutoDateFormatter, DateFormatter, RRuleLocator, \
rrulewrapper, HOURLY, MINUTELY, SECONDLY, YEARLY, MONTHLY, DAILY
from dateutil.relativedelta import relativedelta
def evalPrefs( *args, **kw ):
""" Interpret arguments as preferencies dictionaries or key-value pairs. The overriding order
is right most - most important one. Returns a single dictionary of preferencies
"""
prefs = {}
for pDict in list( args ) + [kw]:
if isinstance(pDict, dict):
for key in pDict:
if key == "metadata":
for mkey in pDict[key]:
prefs[mkey] = pDict[key][mkey]
else:
prefs[key] = pDict[key]
return prefs
def pixelToPoint( size, dpi ):
""" Convert size expressed in pixels into points for a given dpi resolution
"""
return float( size ) * 100. / float( dpi )
datestrings = ['%x %X', '%x', '%Y-%m-%d %H:%M:%S']
def convert_to_datetime( dstring ):
orig_string = str( dstring )
try:
if isinstance( dstring, datetime.datetime ):
results = dstring
else:
results = eval( str( dstring ), {'__builtins__':None, 'time':time, 'math':math}, {} )
if isinstance(results, (int, float)):
results = datetime.datetime.fromtimestamp( int( results ) )
elif isinstance( results, datetime.datetime ):
pass
else:
raise ValueError( "Unknown datetime type!" )
except Exception as e:
t = None
for dateformat in datestrings:
try:
t = time.strptime(dstring, dateformat)
timestamp = calendar.timegm( t ) #-time.timezone
results = datetime.datetime.fromtimestamp( timestamp )
break
except:
pass
if t is None:
try:
dstring = dstring.split('.', 1)[0]
t = time.strptime(dstring, dateformat)
timestamp = time.mktime( t ) #-time.timezone
results = datetime.datetime.fromtimestamp( timestamp )
except:
raise ValueError( "Unable to create time from string!\nExpecting " \
"format of: '12/06/06 12:54:67'\nRecieved:%s" % orig_string )
return results
def to_timestamp( val ):
try:
v = float( val )
if v > 1000000000 and v < 1900000000:
return v
except:
pass
val = convert_to_datetime( val )
#return calendar.timegm( val.timetuple() )
return time.mktime( val.timetuple() )
# If the graph has more than `hour_switch` minutes, we print
# out hours in the subtitle.
hour_switch = 7
# If the graph has more than `day_switch` hours, we print
# out days in the subtitle.
day_switch = 7
# If the graph has more than `week_switch` days, we print
# out the weeks in the subtitle.
week_switch = 7
def add_time_to_title( begin, end, metadata = {} ):
""" Given a title and two times, adds the time info to the title.
Example results::
"Number of Attempted Transfers
(24 Hours from 4:45 12-14-2006 to 5:56 12-15-2006)"
There are two important pieces to the subtitle we add - the duration
(i.e., '48 Hours') and the time interval (i.e., 11:00 07-02-2007 to
11:00 07-04-2007).
We attempt to make the duration match the size of the span (for a bar
graph, this would be the width of the individual bar) in order for it
to make the most sense. The formatting of the time interval is based
upon how much real time there is from the beginning to the end.
We made the distinction because some would want to show graphs
representing 168 Hours, but needed the format to show the date as
well as the time.
"""
if 'span' in metadata:
interval = metadata['span']
else:
interval = time_interval( begin, end )
formatting_interval = time_interval( begin, end )
if formatting_interval == 600:
format_str = '%H:%M:%S'
elif formatting_interval == 3600:
format_str = '%Y-%m-%d %H:%M'
elif formatting_interval == 86400:
format_str = '%Y-%m-%d'
elif formatting_interval == 86400 * 7:
format_str = 'Week %U of %Y'
if interval < 600:
format_name = 'Seconds'
time_slice = 1
elif interval < 3600 and interval >= 600:
format_name = 'Minutes'
time_slice = 60
elif interval >= 3600 and interval < 86400:
format_name = 'Hours'
time_slice = 3600
elif interval >= 86400 and interval < 86400 * 7:
format_name = 'Days'
time_slice = 86400
elif interval >= 86400 * 7:
format_name = 'Weeks'
time_slice = 86400 * 7
else:
format_str = '%x %X'
format_name = 'Seconds'
time_slice = 1
begin_tuple = time.localtime( begin )
end_tuple = time.localtime( end )
added_title = '%i %s from ' % ( int( ( end - begin ) / time_slice ), format_name )
added_title += time.strftime( '%s to' % format_str, begin_tuple )
if time_slice < 86400:
add_utc = ' UTC'
else:
add_utc = ''
added_title += time.strftime( ' %s%s' % ( format_str, add_utc ), end_tuple )
return added_title
def time_interval( begin, end ):
"""
Determine the appropriate time interval based upon the length of
time as indicated by the `starttime` and `endtime` keywords.
"""
if end - begin < 600 * hour_switch:
return 600
if end - begin < 86400 * day_switch:
return 3600
elif end - begin < 86400 * 7 * week_switch:
return 86400
else:
return 86400 * 7
def comma_format( x_orig ):
x = float( x_orig )
if x >= 1000:
after_comma = x % 1000
before_comma = int( x ) / 1000
return '%s,%03g' % ( comma_format( before_comma ), after_comma )
else:
return str( x_orig )
class PrettyScalarFormatter( ScalarFormatter ):
def _set_orderOfMagnitude( self, range ):
# if scientific notation is to be used, find the appropriate exponent
# if using an numerical offset, find the exponent after applying the offset
locs = numpy.absolute( self.locs )
if self.offset: oom = math.floor( math.log10( range ) )
else:
if locs[0] > locs[-1]:
val = locs[0]
else:
val = locs[-1]
if val == 0:
oom = 0
else:
oom = math.floor( math.log10( val ) )
if oom <= -7:
self.orderOfMagnitude = oom
elif oom >= 9:
self.orderOfMagnitude = oom
else:
self.orderOfMagnitude = 0
def pprint_val( self, x ):
pstring = ScalarFormatter.pprint_val( self, x )
return comma_format( pstring )
class PrettyDateFormatter( AutoDateFormatter ):
""" This class provides a formatter which conforms to the
desired date formates for the Phedex system.
"""
def __init__( self, locator ):
tz = pytz.timezone( 'UTC' )
AutoDateFormatter.__init__( self, locator, tz = tz )
def __call__( self, x, pos = 0 ):
scale = float( self._locator._get_unit() )
if scale == 365.0:
self._formatter = DateFormatter( "%Y", self._tz )
elif scale == 30.0:
self._formatter = DateFormatter( "%b %Y", self._tz )
elif ( scale >= 1.0 ) and ( scale <= 7.0 ):
self._formatter = DateFormatter( "%Y-%m-%d", self._tz )
elif scale == ( 1.0 / 24.0 ):
self._formatter = DateFormatter( "%H:%M", self._tz )
elif scale == ( 1.0 / ( 24 * 60 ) ):
self._formatter = DateFormatter( "%H:%M", self._tz )
elif scale == ( 1.0 / ( 24 * 3600 ) ):
self._formatter = DateFormatter( "%H:%M:%S", self._tz )
else:
self._formatter = DateFormatter( "%b %d %Y %H:%M:%S", self._tz )
return self._formatter( x, pos )
class PrettyDateLocator( AutoDateLocator ):
def get_locator( self, dmin, dmax ):
'pick the best locator based on a distance'
delta = relativedelta( dmax, dmin )
numYears = ( delta.years * 1.0 )
numMonths = ( numYears * 12.0 ) + delta.months
numDays = ( numMonths * 31.0 ) + delta.days
numHours = ( numDays * 24.0 ) + delta.hours
numMinutes = ( numHours * 60.0 ) + delta.minutes
numSeconds = ( numMinutes * 60.0 ) + delta.seconds
numticks = 5
# self._freq = YEARLY
interval = 1
bymonth = 1
bymonthday = 1
byhour = 0
byminute = 0
bysecond = 0
if numYears >= numticks:
self._freq = YEARLY
elif numMonths >= numticks:
self._freq = MONTHLY
bymonth = range( 1, 13 )
if ( 0 <= numMonths ) and ( numMonths <= 14 ):
interval = 1 # show every month
elif ( 15 <= numMonths ) and ( numMonths <= 29 ):
interval = 3 # show every 3 months
elif ( 30 <= numMonths ) and ( numMonths <= 44 ):
interval = 4 # show every 4 months
else: # 45 <= numMonths <= 59
interval = 6 # show every 6 months
elif numDays >= numticks:
self._freq = DAILY
bymonth = None
bymonthday = range( 1, 32 )
if ( 0 <= numDays ) and ( numDays <= 9 ):
interval = 1 # show every day
elif ( 10 <= numDays ) and ( numDays <= 19 ):
interval = 2 # show every 2 days
elif ( 20 <= numDays ) and ( numDays <= 35 ):
interval = 3 # show every 3 days
elif ( 36 <= numDays ) and ( numDays <= 80 ):
interval = 7 # show every 1 week
else: # 100 <= numDays <= ~150
interval = 14 # show every 2 weeks
elif numHours >= numticks:
self._freq = HOURLY
bymonth = None
bymonthday = None
byhour = range( 0, 24 ) # show every hour
if ( 0 <= numHours ) and ( numHours <= 14 ):
interval = 1 # show every hour
elif ( 15 <= numHours ) and ( numHours <= 30 ):
interval = 2 # show every 2 hours
elif ( 30 <= numHours ) and ( numHours <= 45 ):
interval = 3 # show every 3 hours
elif ( 45 <= numHours ) and ( numHours <= 68 ):
interval = 4 # show every 4 hours
elif ( 68 <= numHours ) and ( numHours <= 90 ):
interval = 6 # show every 6 hours
else: # 90 <= numHours <= 120
interval = 12 # show every 12 hours
elif numMinutes >= numticks:
self._freq = MINUTELY
bymonth = None
bymonthday = None
byhour = None
byminute = range( 0, 60 )
if numMinutes > ( 10.0 * numticks ):
interval = 10
# end if
elif numSeconds >= numticks:
self._freq = SECONDLY
bymonth = None
bymonthday = None
byhour = None
byminute = None
bysecond = range( 0, 60 )
if numSeconds > ( 10.0 * numticks ):
interval = 10
# end if
else:
# do what?
# microseconds as floats, but floats from what reference point?
pass
rrule = rrulewrapper( self._freq, interval = interval, \
dtstart = dmin, until = dmax, \
bymonth = bymonth, bymonthday = bymonthday, \
byhour = byhour, byminute = byminute, \
bysecond = bysecond )
locator = RRuleLocator( rrule, self.tz )
locator.set_axis( self.axis )
locator.set_view_interval( *self.axis.get_view_interval() )
locator.set_data_interval( *self.axis.get_data_interval() )
return locator
def pretty_float( num ):
if num > 1000:
return comma_format( int( num ) )
try:
floats = int( max( 2 - max( numpy.floor( numpy.log( abs( num ) + 1e-3 ) / numpy.log( 10. ) ), 0 ), 0 ) )
except:
floats = 2
format = "%." + str( floats ) + "f"
if isinstance(num, tuple):
return format % float( num[0] )
else:
try:
retval = format % float( num )
except:
raise Exception( "Unable to convert %s into a float." % ( str( num ) ) )
return retval
def statistics( results, span = None, is_timestamp = False ):
results = dict( results )
if span != None:
parsed_data = {}
min_key = min( results.keys() )
max_key = max( results.keys() )
for i in range( min_key, max_key + span, span ):
if i in results:
parsed_data[i] = results[i]
del results[i]
else:
parsed_data[i] = 0.0
if len( results ) > 0:
raise Exception( "Unable to use all the values for the statistics" )
else:
parsed_data = results
values = parsed_data.values()
data_min = min( values )
data_max = max( values )
data_avg = numpy.average( values )
if is_timestamp:
current_time = max( parsed_data.keys() )
data_current = parsed_data[ current_time ]
return data_min, data_max, data_avg, data_current
else:
return data_min, data_max, data_avg
def makeDataFromCSV( csv ):
""" Generate plot data dictionary from a csv file or string
"""
if os.path.exists( csv ):
with open( csv, 'r' ) as fdata:
flines = fdata.readlines()
else:
flines = csv.split( '\n' )
graph_data = {}
labels = flines[0].strip().split( ',' )
if len( labels ) == 2:
# simple plot data
for line in flines:
line = line.strip()
if line[0] != '#':
key, value = line.split( ',' )
graph_data[key] = value
elif len( flines ) == 2:
values = flines[1].strip().split( ',' )
for key,value in zip(labels,values):
graph_data[key] = value
elif len( labels ) > 2:
# stacked graph data
del labels[0]
del flines[0]
for label in labels:
plot_data = {}
index = labels.index( label ) + 1
for line in flines:
values = line.strip().split( ',' )
value = values[index].strip()
#if value:
plot_data[values[0]] = values[index]
#else:
#plot_data[values[0]] = '0.'
#pass
graph_data[label] = dict( plot_data )
return graph_data
def darkenColor( color, factor=2 ):
c1 = int( color[1:3], 16 )
c2 = int( color[3:5], 16 )
c3 = int( color[5:7], 16 )
c1 /= factor
c2 /= factor
c3 /= factor
result = '#' + (str( hex( c1) ).replace( '0x', '' ).zfill( 2 ) +
str( hex( c2) ).replace( '0x', '' ).zfill( 2 ) +
str( hex( c3) ).replace( '0x', '' ).zfill( 2 ) )
return result
| gpl-3.0 |
elkingtonmcb/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
ibmsoe/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_functions.py | 46 | 15782 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import types as tp
import numpy as np
import six
from tensorflow.python.estimator.inputs.queues import feeding_queue_runner as fqr
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _get_integer_indices_for_next_batch(
batch_indices_start, batch_size, epoch_end, array_length,
current_epoch, total_epochs):
"""Returns the integer indices for next batch.
If total epochs is not None and current epoch is the final epoch, the end
index of the next batch should not exceed the `epoch_end` (i.e., the final
batch might not have size `batch_size` to avoid overshooting the last epoch).
Args:
batch_indices_start: Integer, the index to start next batch.
batch_size: Integer, size of batches to return.
epoch_end: Integer, the end index of the epoch. The epoch could start from a
random position, so `epoch_end` provides the end index for that.
array_length: Integer, the length of the array.
current_epoch: Integer, the epoch number has been emitted.
total_epochs: Integer or `None`, the total number of epochs to emit. If
`None` will run forever.
Returns:
A tuple of a list with integer indices for next batch and `current_epoch`
value after the next batch.
Raises:
OutOfRangeError if `current_epoch` is not less than `total_epochs`.
"""
if total_epochs is not None and current_epoch >= total_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % current_epoch)
batch_indices_end = batch_indices_start + batch_size
batch_indices = [j % array_length for j in
range(batch_indices_start, batch_indices_end)]
epoch_end_indices = [i for i, x in enumerate(batch_indices) if x == epoch_end]
current_epoch += len(epoch_end_indices)
if total_epochs is None or current_epoch < total_epochs:
return (batch_indices, current_epoch)
# Now we might have emitted more data for expected epochs. Need to trim.
final_epoch_end_inclusive = epoch_end_indices[
-(current_epoch - total_epochs + 1)]
batch_indices = batch_indices[:final_epoch_end_inclusive + 1]
return (batch_indices, total_epochs)
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
return {
self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]
}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [
column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()
]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
integer_indexes, self._epoch = _get_integer_indices_for_next_batch(
batch_indices_start=self._trav,
batch_size=self._batch_size,
epoch_end=self._epoch_end,
array_length=self._max,
current_epoch=self._epoch,
total_epochs=self._num_epochs)
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
class _GeneratorFeedFn(object):
"""Creates feed dictionaries from `Generator` of `dicts` of numpy arrays."""
def __init__(self,
placeholders,
generator,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
first_sample = next(generator())
if len(placeholders) != len(first_sample):
raise ValueError("Expected {} placeholders; got {}.".format(
len(first_sample), len(placeholders)))
self._keys = sorted(list(first_sample.keys()))
self._col_placeholders = placeholders
self._generator_function = generator
self._iterator = generator()
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun"
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index],
list()).append(data_row[key])
list_dict_size += 1
feed_dict = {key: np.asarray(item) for key, item in list(list_dict.items())}
return feed_dict
def _enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given (`OrderedDict` of) array
or `DataFrame`. In the case of a pandas `DataFrame`, the first enqueued
`Tensor` corresponds to the index of the `DataFrame`. For (`OrderedDict` of)
numpy arrays, the first enqueued `Tensor` contains the row number.
Args:
data: a numpy `ndarray`, `OrderedDict` of numpy arrays, or a generator
yielding `dict`s of numpy arrays or pandas `DataFrame` that will be read
into the queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
Returns:
A queue filled with the rows of the given (`OrderedDict` of) array or
`DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame`, an `OrderedDict` of numpy
arrays, a numpy `ndarray`, or a generator producing these.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [
dtypes.as_dtype(col.dtype) for col in data.values()
]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif isinstance(data, tp.FunctionType):
x_first_el = six.next(data())
x_first_keys = sorted(x_first_el.keys())
x_first_values = [x_first_el[key] for key in x_first_keys]
types = [dtypes.as_dtype(col.dtype) for col in x_first_values]
queue_shapes = [col.shape for col in x_first_values]
get_feed_fn = _GeneratorFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [
dtypes.as_dtype(dt) for dt in [data.index.dtype] + list(data.dtypes)
]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(
capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(
capacity, dtypes=types, shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(
get_feed_fn(
placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
runner = fqr._FeedingQueueRunner( # pylint: disable=protected-access
queue=queue, enqueue_ops=enqueue_ops, feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| apache-2.0 |
Vimos/scikit-learn | sklearn/utils/tests/test_testing.py | 29 | 7316 | import warnings
import unittest
import sys
from sklearn.utils.testing import (
assert_raises,
assert_less,
assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message,
ignore_warnings)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
def test_assert_less():
assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
def test_assert_greater():
assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
def test_ignore_warning():
# This check that ignore_warning decorateur and context manager are working
# as expected
def _warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
def _multiple_warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
warnings.warn("deprecation warning")
# Check the function directly
assert_no_warnings(ignore_warnings(_warning_function))
assert_no_warnings(ignore_warnings(_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning, ignore_warnings(_warning_function,
category=UserWarning))
assert_warns(UserWarning,
ignore_warnings(_multiple_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning,
ignore_warnings(_multiple_warning_function,
category=UserWarning))
assert_no_warnings(ignore_warnings(_warning_function,
category=(DeprecationWarning,
UserWarning)))
# Check the decorator
@ignore_warnings
def decorator_no_warning():
_warning_function()
_multiple_warning_function()
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def decorator_no_warning_multiple():
_multiple_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_warning():
_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_warning():
_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_multiple_warning():
_multiple_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_multiple_warning():
_multiple_warning_function()
assert_no_warnings(decorator_no_warning)
assert_no_warnings(decorator_no_warning_multiple)
assert_no_warnings(decorator_no_deprecation_warning)
assert_warns(DeprecationWarning, decorator_no_user_warning)
assert_warns(UserWarning, decorator_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, decorator_no_user_multiple_warning)
# Check the context manager
def context_manager_no_warning():
with ignore_warnings():
_warning_function()
def context_manager_no_warning_multiple():
with ignore_warnings(category=(DeprecationWarning, UserWarning)):
_multiple_warning_function()
def context_manager_no_deprecation_warning():
with ignore_warnings(category=DeprecationWarning):
_warning_function()
def context_manager_no_user_warning():
with ignore_warnings(category=UserWarning):
_warning_function()
def context_manager_no_deprecation_multiple_warning():
with ignore_warnings(category=DeprecationWarning):
_multiple_warning_function()
def context_manager_no_user_multiple_warning():
with ignore_warnings(category=UserWarning):
_multiple_warning_function()
assert_no_warnings(context_manager_no_warning)
assert_no_warnings(context_manager_no_warning_multiple)
assert_no_warnings(context_manager_no_deprecation_warning)
assert_warns(DeprecationWarning, context_manager_no_user_warning)
assert_warns(UserWarning, context_manager_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, context_manager_no_user_multiple_warning)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
bzero/statsmodels | tools/code_maintenance.py | 37 | 2307 | """
Code maintenance script modified from PyMC
"""
#!/usr/bin/env python
import sys
import os
# This is a function, not a test case, because it has to be run from inside
# the source tree to work well.
mod_strs = ['IPython', 'pylab', 'matplotlib', 'scipy','Pdb']
dep_files = {}
for mod_str in mod_strs:
dep_files[mod_str] = []
def remove_whitespace(fname):
# Remove trailing whitespace
fd = open(fname,mode='U') # open in universal newline mode
lines = []
for line in fd.readlines():
lines.append( line.rstrip() )
fd.close()
fd = open(fname,mode='w')
fd.seek(0)
for line in lines:
fd.write(line+'\n')
fd.close()
# print 'Removed whitespace from %s'%fname
def find_whitespace(fname):
fd = open(fname, mode='U')
for line in fd.readlines():
#print repr(line)
if ' \n' in line:
print fname
break
# print
print_only = True
# ====================
# = Strip whitespace =
# ====================
for dirname, dirs, files in os.walk('.'):
if dirname[1:].find('.')==-1:
# print dirname
for fname in files:
if fname[-2:] in ['c', 'f'] or fname[-3:]=='.py' or fname[-4:] in ['.pyx', '.txt', '.tex', '.sty', '.cls'] or fname.find('.')==-1:
# print fname
if print_only:
find_whitespace(dirname + '/' + fname)
else:
remove_whitespace(dirname + '/' + fname)
"""
# ==========================
# = Check for dependencies =
# ==========================
for dirname, dirs, files in os.walk('pymc'):
for fname in files:
if fname[-3:]=='.py' or fname[-4:]=='.pyx':
if dirname.find('sandbox')==-1 and fname != 'test_dependencies.py'\
and dirname.find('examples')==-1:
for mod_str in mod_strs:
if file(dirname+'/'+fname).read().find(mod_str)>=0:
dep_files[mod_str].append(dirname+'/'+fname)
print 'Instances of optional dependencies found are:'
for mod_str in mod_strs:
print '\t'+mod_str+':'
for fname in dep_files[mod_str]:
print '\t\t'+fname
if len(dep_files['Pdb'])>0:
raise ValueError, 'Looks like Pdb was not commented out in '+', '.join(dep_files[mod_str])
"""
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.