repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
cg31/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/io_test.py | 20 | 5003 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import tensorflow as tf
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn.learn_io import *
# pylint: enable=wildcard-import
class IOTest(tf.test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
mayblue9/scikit-learn | sklearn/svm/tests/test_svm.py | 70 | 31674 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
hadrianmontes/Plotter2 | template_gepmetry.py | 1 | 2793 | from ui.template_dialog import Ui_Dialog
from PyQt4 import QtGui
import matplotlib.pyplot as plt
from axes_template import axes_template
from time import sleep
class TemplateDialog(Ui_Dialog):
def __init__(self,*args,**kwargs):
self.selectedIndex=False
super(TemplateDialog,self).__init__(*args,**kwargs)
def setupUi(self,MainWindow):
self.parent=MainWindow
super(TemplateDialog,self).setupUi(MainWindow)
self.set_but.clicked.connect(self.setColRow)
self.table_template.itemSelectionChanged.connect(self.increase_selection)
self.table_template.cellPressed.connect(self.selected_item)
self.preview_but.clicked.connect(self.preview)
self.dialog_buttons.accepted.connect(self.accept)
self.dialog_buttons.rejected.connect(self.reject)
self.setColRow()
def setColRow(self):
ncol=self.ncol.value()
self.table_template.setColumnCount(ncol)
nrow=self.nrow.value()
self.table_template.setRowCount(nrow)
index=0
self.matrix=[]
for i in range(nrow):
self.matrix.append([])
for j in range(ncol):
index+=1
self.matrix[-1].append(index)
item=QtGui.QTableWidgetItem(str(index))
self.table_template.setItem(i,j,item)
self.prematrix=[]
for i in range(len(self.matrix)):
self.prematrix.append([])
for j in range(len(self.matrix[0])):
self.prematrix[-1].append(self.matrix[i][j])
def selected_item(self):
# print(self.table_template.selectedIndexes())
index=self.table_template.currentIndex()
self.selectedIndex=self.prematrix[index.row()][index.column()]
self.prematrix=[]
for i in range(len(self.matrix)):
self.prematrix.append([])
for j in range(len(self.matrix[0])):
self.prematrix[-1].append(self.matrix[i][j])
self.startingIn=(index.row(),index.column())
# print(index.row(),index.column(),self.selectedIndex)
def increase_selection(self):
if not self.selectedIndex:
return
indexes=self.table_template.selectedIndexes()
for index in indexes:
row=index.row()
column=index.column()
self.matrix[row][column]=self.selectedIndex
item=QtGui.QTableWidgetItem(str(self.selectedIndex))
self.table_template.setItem(row,column,item)
def preview(self):
template=axes_template(matrix=self.matrix)
fig=template.generate_preview()
fig.show()
sleep(5)
plt.close(fig)
def accept(self):
self.parent.accept()
def reject(self):
self.parent.close()
| mit |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/matplotlib/backends/backend_wxagg.py | 10 | 5840 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib
from matplotlib.figure import Figure
from .backend_agg import FigureCanvasAgg
from . import wx_compat as wxc
from . import backend_wx
from .backend_wx import (FigureManagerWx, FigureCanvasWx,
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, Toolbar)
import wx
show = backend_wx.Show()
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC, origin='WXAgg')
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
frame = FigureFrameWxAgg(num, figure)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
figure.canvas.draw_idle()
return figmgr
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wxc.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wxc.BitmapFromBuffer(int(agg.width), int(agg.height),
agg.buffer_rgba())
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.bounds
r = l + width
t = b + height
srcBmp = wxc.BitmapFromBuffer(int(agg.width), int(agg.height),
agg.buffer_rgba())
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wxc.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
FigureCanvas = FigureCanvasWxAgg
FigureManager = FigureManagerWx
| apache-2.0 |
aashish24/seaborn | seaborn/linearmodels.py | 1 | 67085 | """Plotting functions for linear models (broadly construed)."""
from __future__ import division
import copy
import itertools
import numpy as np
import pandas as pd
from scipy.spatial import distance
import matplotlib as mpl
import matplotlib.pyplot as plt
try:
import statsmodels
assert statsmodels
_has_statsmodels = True
except ImportError:
_has_statsmodels = False
from .external.six import string_types
from .external.six.moves import range
from . import utils
from . import algorithms as algo
from .palettes import color_palette
from .axisgrid import FacetGrid, PairGrid
from .distributions import kdeplot
class _LinearPlotter(object):
"""Base class for plotting relational data in tidy format.
To get anything useful done you'll have to inherit from this, but setup
code that can be abstracted out should be put here.
"""
def establish_variables(self, data, **kws):
"""Extract variables from data or use directly."""
self.data = data
# Validate the inputs
any_strings = any([isinstance(v, string_types) for v in kws.values()])
if any_strings and data is None:
raise ValueError("Must pass `data` if using named variables.")
# Set the variables
for var, val in kws.items():
if isinstance(val, string_types):
setattr(self, var, data[val])
else:
setattr(self, var, val)
def dropna(self, *vars):
"""Remove observations with missing data."""
vals = [getattr(self, var) for var in vars]
vals = [v for v in vals if v is not None]
not_na = np.all(np.column_stack([pd.notnull(v) for v in vals]), axis=1)
for var in vars:
val = getattr(self, var)
if val is not None:
setattr(self, var, val[not_na])
def plot(self, ax):
raise NotImplementedError
class _DiscretePlotter(_LinearPlotter):
"""Plotter for data with discrete independent variable(s).
This will be used by the `barplot` and `pointplot` functions, and
thus indirectly by the `factorplot` function. It can produce plots
where some statistic for a dependent measure is estimated within
subsets of the data, which can be hierarchically structured at up to two
levels (`x` and `hue`). The plots can be drawn with a few different
visual representations of the same underlying data (`bar`, and `point`,
with `box` doing something similar but skipping the estimation).
"""
def __init__(self, x, y=None, hue=None, data=None, units=None,
x_order=None, hue_order=None, color=None, palette=None,
kind="auto", markers=None, linestyles=None, dodge=0,
join=True, hline=None, estimator=np.mean, ci=95,
n_boot=1000, dropna=True):
# This implies we have a single bar/point for each level of `x`
# but that the different levels should be mapped with a palette
self.x_palette = hue is None and palette is not None
# Set class attributes based on inputs
self.estimator = len if y is None else estimator
self.ci = None if y is None else ci
self.join = join
self.n_boot = n_boot
self.hline = hline
# Other attributes that are hardcoded for now
self.bar_widths = .8
self.err_color = "#444444"
self.lw = mpl.rcParams["lines.linewidth"] * 1.8
# Once we've set the above values, if `y` is None we want the actual
# y values to be the x values so we can count them
self.y_count = y is None
if y is None:
y = x
# Ascertain which values will be associated with what values
self.establish_variables(data, x=x, y=y, hue=hue, units=units)
# Figure out the order of the variables on the x axis
x_sorted = np.sort(pd.unique(self.x))
self.x_order = x_sorted if x_order is None else x_order
if self.hue is not None:
hue_sorted = np.sort(pd.unique(self.hue))
self.hue_order = hue_sorted if hue_order is None else hue_order
else:
self.hue_order = [None]
# Handle the other hue-mapped attributes
if markers is None:
self.markers = ["o"] * len(self.hue_order)
else:
if len(markers) != len(self.hue_order):
raise ValueError("Length of marker list must equal "
"number of hue levels")
self.markers = markers
if linestyles is None:
self.linestyles = ["-"] * len(self.hue_order)
else:
if len(linestyles) != len(self.hue_order):
raise ValueError("Length of linestyle list must equal "
"number of hue levels")
self.linestyles = linestyles
# Drop null observations
if dropna:
self.dropna("x", "y", "hue", "units")
# Settle whe kind of plot this is going to be
self.establish_plot_kind(kind)
# Determine the color palette
self.establish_palette(color, palette)
# Figure out where the data should be drawn
self.establish_positions(dodge)
def establish_palette(self, color, palette):
"""Set a list of colors for each plot element."""
n_hues = len(self.x_order) if self.x_palette else len(self.hue_order)
hue_names = self.x_order if self.x_palette else self.hue_order
if self.hue is None and not self.x_palette:
if color is None:
color = color_palette()[0]
palette = [color for _ in self.x_order]
elif palette is None:
current_palette = mpl.rcParams["axes.color_cycle"]
if len(current_palette) <= n_hues:
palette = color_palette("husl", n_hues)
else:
palette = color_palette(n_colors=n_hues)
elif isinstance(palette, dict):
palette = [palette[k] for k in hue_names]
palette = color_palette(palette, n_hues)
else:
palette = color_palette(palette, n_hues)
self.palette = palette
if self.kind == "point":
self.err_palette = palette
else:
# TODO make this smarter
self.err_palette = [self.err_color] * len(palette)
def establish_positions(self, dodge):
"""Make list of center values for each x and offset for each hue."""
self.positions = np.arange(len(self.x_order))
# If there's no hue variable kind is irrelevant
if self.hue is None:
n_hues = 1
width = self.bar_widths
offset = np.zeros(n_hues)
else:
n_hues = len(self.hue_order)
# Bar offset is set by hardcoded bar width
if self.kind in ["bar", "box"]:
width = self.bar_widths / n_hues
offset = np.linspace(0, self.bar_widths - width, n_hues)
if self.kind == "box":
width *= .95
self.bar_widths = width
# Point offset is set by `dodge` parameter
elif self.kind == "point":
offset = np.linspace(0, dodge, n_hues)
offset -= offset.mean()
self.offset = offset
def establish_plot_kind(self, kind):
"""Use specified kind of apply heuristics to decide automatically."""
if kind == "auto":
y = self.y
# Walk through some heuristics to automatically assign a kind
if self.y_count:
kind = "bar"
elif y.max() <= 1:
kind = "point"
elif (y.mean() / y.std()) < 2.5:
kind = "bar"
else:
kind = "point"
self.kind = kind
elif kind in ["bar", "point"]:
self.kind = kind
else:
raise ValueError("%s is not a valid kind of plot" % kind)
@property
def estimate_data(self):
"""Generator to yield x, y, and ci data for each hue subset."""
# First iterate through the hues, as plots are drawn for all
# positions of a given hue at the same time
for i, hue in enumerate(self.hue_order):
# Build intermediate lists of the values for each drawing
pos = []
height = []
ci = []
for j, x in enumerate(self.x_order):
pos.append(self.positions[j] + self.offset[i])
# Focus on the data for this specific bar/point
current_data = (self.x == x) & (self.hue == hue)
y_data = self.y[current_data]
if self.units is None:
unit_data = None
else:
unit_data = self.units[current_data]
# This is where the main computation happens
height.append(self.estimator(y_data))
# Only bootstrap with multple values
if current_data.sum() < 2:
ci.append((None, None))
continue
# Get the confidence intervals
if self.ci is not None:
boots = algo.bootstrap(y_data, func=self.estimator,
n_boot=self.n_boot,
units=unit_data)
ci.append(utils.ci(boots, self.ci))
yield pos, height, ci
@property
def binned_data(self):
"""Generator to yield entire subsets of data for each bin."""
# First iterate through the hues, as plots are drawn for all
# positions of a given hue at the same time
for i, hue in enumerate(self.hue_order):
# Build intermediate lists of the values for each drawing
pos = []
data = []
for j, x in enumerate(self.x_order):
pos.append(self.positions[j] + self.offset[i])
current_data = (self.x == x) & (self.hue == hue)
data.append(self.y[current_data])
yield pos, data
def plot(self, ax):
"""Plot based on the stored value for kind of plot."""
plotter = getattr(self, self.kind + "plot")
plotter(ax)
# Set the plot attributes (these are shared across plot kinds
if self.hue is not None:
leg = ax.legend(loc="best", scatterpoints=1)
if hasattr(self.hue, "name"):
leg.set_title(self.hue.name),
# Set the title size a roundabout way to maintain
# compatability with matplotlib 1.1
titlesize = mpl.rcParams["axes.labelsize"]
prop = mpl.font_manager.FontProperties(size=titlesize)
leg._legend_title_box._text.set_font_properties(prop)
ax.xaxis.grid(False)
ax.set_xticks(self.positions)
ax.set_xticklabels(self.x_order)
if hasattr(self.x, "name"):
ax.set_xlabel(self.x.name)
if self.y_count:
ax.set_ylabel("count")
else:
if hasattr(self.y, "name"):
ax.set_ylabel(self.y.name)
if self.hline is not None:
ymin, ymax = ax.get_ylim()
if self.hline > ymin and self.hline < ymax:
ax.axhline(self.hline, c="#666666")
def barplot(self, ax):
"""Draw the plot with a bar representation."""
for i, (pos, height, ci) in enumerate(self.estimate_data):
color = self.palette if self.x_palette else self.palette[i]
ecolor = self.err_palette[i]
label = self.hue_order[i]
# The main plot
ax.bar(pos, height, self.bar_widths, color=color,
label=label, align="center")
# The error bars
for x, (low, high) in zip(pos, ci):
if low is None:
continue
ax.plot([x, x], [low, high], linewidth=self.lw, color=ecolor)
# Set the x limits
offset = .5
xlim = self.positions.min() - offset, self.positions.max() + offset
ax.set_xlim(xlim)
def boxplot(self, ax):
"""Draw the plot with a bar representation."""
from .distributions import boxplot
for i, (pos, data) in enumerate(self.binned_data):
color = self.palette if self.x_palette else self.palette[i]
label = self.hue_order[i]
# The main plot
boxplot(data, widths=self.bar_widths, color=color,
positions=pos, label=label, ax=ax)
# Set the x limits
offset = .5
xlim = self.positions.min() - offset, self.positions.max() + offset
ax.set_xlim(xlim)
def pointplot(self, ax):
"""Draw the plot with a point representation."""
for i, (pos, height, ci) in enumerate(self.estimate_data):
color = self.palette if self.x_palette else self.palette[i]
err_palette = self.err_palette
label = self.hue_order[i]
marker = self.markers[i]
markersize = np.pi * np.square(self.lw) * 2
linestyle = self.linestyles[i]
z = i + 1
# The error bars
for j, (x, (low, high)) in enumerate(zip(pos, ci)):
if low is None:
continue
ecolor = err_palette[j] if self.x_palette else err_palette[i]
ax.plot([x, x], [low, high], linewidth=self.lw,
color=ecolor, zorder=z)
# The main plot
ax.scatter(pos, height, s=markersize, color=color, label=label,
marker=marker, zorder=z)
# The join line
if self.join:
ax.plot(pos, height, color=color,
linewidth=self.lw, linestyle=linestyle, zorder=z)
# Set the x limits
xlim = (self.positions.min() + self.offset.min() - .3,
self.positions.max() + self.offset.max() + .3)
ax.set_xlim(xlim)
class _RegressionPlotter(_LinearPlotter):
"""Plotter for numeric independent variables with regression model.
This does the computations and drawing for the `regplot` function, and
is thus also used indirectly by `lmplot`. It is generally similar to
the `_DiscretePlotter`, but it's intended for use when the independent
variable is numeric (continuous or discrete), and its primary advantage
is that a regression model can be fit to the data and visualized, allowing
extrapolations beyond the observed datapoints.
"""
def __init__(self, x, y, data=None, x_estimator=None, x_bins=None,
x_ci="ci", scatter=True, fit_reg=True, ci=95, n_boot=1000,
units=None, order=1, logistic=False, lowess=False,
robust=False, logx=False, x_partial=None, y_partial=None,
truncate=False, dropna=True, x_jitter=None, y_jitter=None,
color=None, label=None):
# Set member attributes
self.x_estimator = x_estimator
self.ci = ci
self.x_ci = ci if x_ci == "ci" else x_ci
self.n_boot = n_boot
self.scatter = scatter
self.fit_reg = fit_reg
self.order = order
self.logistic = logistic
self.lowess = lowess
self.robust = robust
self.logx = logx
self.truncate = truncate
self.x_jitter = x_jitter
self.y_jitter = y_jitter
self.color = color
self.label = label
# Validate the regression options:
if sum((order > 1, logistic, robust, lowess, logx)) > 1:
raise ValueError("Mutually exclusive regression options.")
# Extract the data vals from the arguments or passed dataframe
self.establish_variables(data, x=x, y=y, units=units,
x_partial=x_partial, y_partial=y_partial)
# Drop null observations
if dropna:
self.dropna("x", "y", "units", "x_partial", "y_partial")
# Regress nuisance variables out of the data
if self.x_partial is not None:
self.x = self.regress_out(self.x, self.x_partial)
if self.y_partial is not None:
self.y = self.regress_out(self.y, self.y_partial)
# Possibly bin the predictor variable, which implies a point estimate
if x_bins is not None:
self.x_estimator = np.mean if x_estimator is None else x_estimator
x_discrete, x_bins = self.bin_predictor(x_bins)
self.x_discrete = x_discrete
else:
self.x_discrete = self.x
# Save the range of the x variable for the grid later
self.x_range = self.x.min(), self.x.max()
@property
def scatter_data(self):
"""Data where each observation is a point."""
x_j = self.x_jitter
if x_j is None:
x = self.x
else:
x = self.x + np.random.uniform(-x_j, x_j, len(self.x))
y_j = self.y_jitter
if y_j is None:
y = self.y
else:
y = self.y + np.random.uniform(-y_j, y_j, len(self.y))
return x, y
@property
def estimate_data(self):
"""Data with a point estimate and CI for each discrete x value."""
x, y = self.x_discrete, self.y
vals = sorted(np.unique(x))
points, cis = [], []
for val in vals:
# Get the point estimate of the y variable
_y = y[x == val]
est = self.x_estimator(_y)
points.append(est)
# Compute the confidence interval for this estimate
if self.x_ci is None:
cis.append(None)
else:
units = None
if self.units is not None:
units = self.units[x == val]
boots = algo.bootstrap(_y, func=self.x_estimator,
n_boot=self.n_boot, units=units)
_ci = utils.ci(boots, self.x_ci)
cis.append(_ci)
return vals, points, cis
def fit_regression(self, ax=None, x_range=None, grid=None):
"""Fit the regression model."""
# Create the grid for the regression
if grid is None:
if self.truncate:
x_min, x_max = self.x_range
else:
if ax is None:
x_min, x_max = x_range
else:
x_min, x_max = ax.get_xlim()
grid = np.linspace(x_min, x_max, 100)
ci = self.ci
# Fit the regression
if self.order > 1:
yhat, yhat_boots = self.fit_poly(grid, self.order)
elif self.logistic:
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families import Binomial
yhat, yhat_boots = self.fit_statsmodels(grid, GLM,
family=Binomial())
elif self.lowess:
ci = None
grid, yhat = self.fit_lowess()
elif self.robust:
from statsmodels.robust.robust_linear_model import RLM
yhat, yhat_boots = self.fit_statsmodels(grid, RLM)
elif self.logx:
yhat, yhat_boots = self.fit_logx(grid)
else:
yhat, yhat_boots = self.fit_fast(grid)
# Compute the confidence interval at each grid point
if ci is None:
err_bands = None
else:
err_bands = utils.ci(yhat_boots, ci, axis=0)
return grid, yhat, err_bands
def fit_fast(self, grid):
"""Low-level regression and prediction using linear algebra."""
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
reg_func = lambda _x, _y: np.linalg.pinv(_x).dot(_y)
yhat = grid.dot(reg_func(X, y))
if self.ci is None:
return yhat, None
beta_boots = algo.bootstrap(X, y, func=reg_func,
n_boot=self.n_boot, units=self.units).T
yhat_boots = grid.dot(beta_boots).T
return yhat, yhat_boots
def fit_poly(self, grid, order):
"""Regression using numpy polyfit for higher-order trends."""
x, y = self.x, self.y
reg_func = lambda _x, _y: np.polyval(np.polyfit(_x, _y, order), grid)
yhat = reg_func(x, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(x, y, func=reg_func,
n_boot=self.n_boot, units=self.units)
return yhat, yhat_boots
def fit_statsmodels(self, grid, model, **kwargs):
"""More general regression function using statsmodels objects."""
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), grid]
reg_func = lambda _x, _y: model(_y, _x, **kwargs).fit().predict(grid)
yhat = reg_func(X, y)
if self.ci is None:
return yhat, None
yhat_boots = algo.bootstrap(X, y, func=reg_func,
n_boot=self.n_boot, units=self.units)
return yhat, yhat_boots
def fit_lowess(self):
"""Fit a locally-weighted regression, which returns its own grid."""
from statsmodels.nonparametric.smoothers_lowess import lowess
grid, yhat = lowess(self.y, self.x).T
return grid, yhat
def fit_logx(self, grid):
"""Fit the model in log-space."""
X, y = np.c_[np.ones(len(self.x)), self.x], self.y
grid = np.c_[np.ones(len(grid)), np.log(grid)]
def reg_func(_x, _y):
_x = np.c_[_x[:, 0], np.log(_x[:, 1])]
return np.linalg.pinv(_x).dot(_y)
yhat = grid.dot(reg_func(X, y))
if self.ci is None:
return yhat, None
beta_boots = algo.bootstrap(X, y, func=reg_func,
n_boot=self.n_boot, units=self.units).T
yhat_boots = grid.dot(beta_boots).T
return yhat, yhat_boots
def bin_predictor(self, bins):
"""Discretize a predictor by assigning value to closest bin."""
x = self.x
if np.isscalar(bins):
percentiles = np.linspace(0, 100, bins + 2)[1:-1]
bins = np.c_[utils.percentiles(x, percentiles)]
else:
bins = np.c_[np.ravel(bins)]
dist = distance.cdist(np.c_[x], bins)
x_binned = bins[np.argmin(dist, axis=1)].ravel()
return x_binned, bins.ravel()
def regress_out(self, a, b):
"""Regress b from a keeping a's original mean."""
a_mean = a.mean()
a = a - a_mean
b = b - b.mean()
b = np.c_[b]
a_prime = a - b.dot(np.linalg.pinv(b).dot(a))
return (a_prime + a_mean).reshape(a.shape)
def plot(self, ax, scatter_kws, line_kws):
"""Draw the full plot."""
# Insert the plot label into the correct set of keyword arguments
if self.scatter:
scatter_kws["label"] = self.label
else:
line_kws["label"] = self.label
# Use the current color cycle state as a default
if self.color is None:
lines, = plt.plot(self.x.mean(), self.y.mean())
color = lines.get_color()
lines.remove()
else:
color = self.color
# Let color in keyword arguments override overall plot color
scatter_kws.setdefault("color", color)
line_kws.setdefault("color", color)
# Draw the constituent plots
if self.scatter:
self.scatterplot(ax, scatter_kws)
if self.fit_reg:
self.lineplot(ax, line_kws)
# Label the axes
if hasattr(self.x, "name"):
ax.set_xlabel(self.x.name)
if hasattr(self.y, "name"):
ax.set_ylabel(self.y.name)
def scatterplot(self, ax, kws):
"""Draw the data."""
# Treat the line-based markers specially, explicitly setting larger
# linewidth than is provided by the seaborn style defaults.
# This would ideally be handled better in matplotlib (i.e., distinguish
# between edgewidth for solid glyphs and linewidth for line glyphs
# but this should do for now.
line_markers = ["1", "2", "3", "4", "+", "x", "|", "_"]
if self.x_estimator is None:
if "marker" in kws and kws["marker"] in line_markers:
lw = mpl.rcParams["lines.linewidth"]
else:
lw = mpl.rcParams["lines.markeredgewidth"]
kws.setdefault("linewidths", lw)
if not hasattr(kws['color'], 'shape') or kws['color'].shape[1] < 4:
kws.setdefault("alpha", .8)
x, y = self.scatter_data
ax.scatter(x, y, **kws)
else:
# TODO abstraction
ci_kws = {"color": kws["color"]}
ci_kws["linewidth"] = mpl.rcParams["lines.linewidth"] * 1.75
kws.setdefault("s", 50)
xs, ys, cis = self.estimate_data
if [ci for ci in cis if ci is not None]:
for x, ci in zip(xs, cis):
ax.plot([x, x], ci, **ci_kws)
ax.scatter(xs, ys, **kws)
def lineplot(self, ax, kws):
"""Draw the model."""
xlim = ax.get_xlim()
# Fit the regression model
grid, yhat, err_bands = self.fit_regression(ax)
# Get set default aesthetics
fill_color = kws["color"]
lw = kws.pop("lw", mpl.rcParams["lines.linewidth"] * 1.5)
kws.setdefault("linewidth", lw)
# Draw the regression line and confidence interval
ax.plot(grid, yhat, **kws)
if err_bands is not None:
ax.fill_between(grid, *err_bands, color=fill_color, alpha=.15)
ax.set_xlim(*xlim)
def lmplot(x, y, data, hue=None, col=None, row=None, palette=None,
col_wrap=None, size=5, aspect=1, markers="o", sharex=True,
sharey=True, hue_order=None, col_order=None, row_order=None,
dropna=True, legend=True, legend_out=True, **kwargs):
"""Plot a data and a regression model fit onto a FacetGrid.
Parameters
----------
x, y : strings
Column names in ``data``.
data : DataFrame
Long-form (tidy) dataframe with variables in columns and observations
in rows.
hue, col, row : strings, optional
Variable names to facet on the hue, col, or row dimensions (see
:class:`FacetGrid` docs for more information).
palette : seaborn palette or dict, optional
Color palette if using a `hue` facet. Should be something that
seaborn.color_palette can read, or a dictionary mapping values of the
hue variable to matplotlib colors.
col_wrap : int, optional
Wrap the column variable at this width. Incompatible with `row`.
size : scalar, optional
Height (in inches) of each facet.
aspect : scalar, optional
Aspect * size gives the width (in inches) of each facet.
markers : single matplotlib marker code or list, optional
Either the marker to use for all datapoints or a list of markers with
a length the same as the number of levels in the hue variable so that
differently colored points will also have different scatterplot
markers.
share{x, y}: booleans, optional
Lock the limits of the vertical and horizontal axes across the
facets.
{hue, col, row}_order: sequence of strings, optional
Order to plot the values in the faceting variables in, otherwise
sorts the unique values.
dropna : boolean, optional
Drop missing values from the data before plotting.
legend : boolean, optional
Draw a legend for the data when using a `hue` variable.
legend_out: boolean, optional
Draw the legend outside the grid of plots.
kwargs : key, value pairs
Other keyword arguments are pasted to :func:`regplot`
Returns
-------
facets : FacetGrid
Returns the :class:`FacetGrid` instance with the plot on it
for further tweaking.
See Also
--------
regplot : Axes-level function for plotting linear regressions.
"""
# Reduce the dataframe to only needed columns
# Otherwise when dropna is True we could lose data because it is missing
# in a column that isn't relevant to this plot
units = kwargs.get("units", None)
x_partial = kwargs.get("x_partial", None)
y_partial = kwargs.get("y_partial", None)
need_cols = [x, y, hue, col, row, units, x_partial, y_partial]
cols = np.unique([a for a in need_cols if a is not None]).tolist()
data = data[cols]
# Initialize the grid
facets = FacetGrid(data, row, col, hue, palette=palette,
row_order=row_order, col_order=col_order,
hue_order=hue_order, dropna=dropna,
size=size, aspect=aspect, col_wrap=col_wrap,
sharex=sharex, sharey=sharey, legend_out=legend_out)
# Add the markers here as FacetGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if facets.hue_names is None:
n_markers = 1
else:
n_markers = len(facets.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError(("markers must be a singeton or a list of markers "
"for each level of the hue variable"))
facets.hue_kws = {"marker": markers}
# Hack to set the x limits properly, which needs to happen here
# because the extent of the regression estimate is determined
# by the limits of the plot
if sharex:
for ax in facets.axes.flat:
scatter = ax.scatter(data[x], np.ones(len(data)) * data[y].mean())
scatter.remove()
# Draw the regression plot on each facet
facets.map_dataframe(regplot, x, y, **kwargs)
# Add a legend
if legend and (hue is not None) and (hue not in [col, row]):
facets.add_legend()
return facets
def factorplot(x, y=None, hue=None, data=None, row=None, col=None,
col_wrap=None, estimator=np.mean, ci=95, n_boot=1000,
units=None, x_order=None, hue_order=None, col_order=None,
row_order=None, kind="auto", markers=None, linestyles=None,
dodge=0, join=True, hline=None, size=5, aspect=1, palette=None,
legend=True, legend_out=True, dropna=True, sharex=True,
sharey=True, margin_titles=False):
"""Plot a variable estimate and error sorted by categorical factors.
Parameters
----------
x : string
Variable name in ``data`` for splitting the plot on the x axis.
y : string, optional
Variable name in ``data`` for the dependent variable. If omitted, the
counts within each bin are plotted (without confidence intervals).
hue : string, optional
Variable name in ``data`` for splitting the plot by color. In the case
of ``kind="bar`"`, this also influences the placement on the x axis.
data : DataFrame
Long-form (tidy) dataframe with variables in columns and observations
in rows.
row, col : strings, optional
Variable name(s) in ``data`` for splitting the plot into a facet grid
along row and columns.
col_wrap : int or None, optional
Wrap the column variable at this width (incompatible with ``row``).
estimator : vector -> scalar function, optional
Function to aggregate ``y`` values at each level of the factors.
ci : int in {0, 100}, optional
Size of confidene interval to draw around the aggregated value.
n_boot : int, optional
Number of bootstrap resamples used to compute confidence interval.
units : vector, optional
Vector with ids for sampling units; bootstrap will be performed over
these units and then within them.
{x, hue, col, row}_order : list-like, optional
Order of levels plotted on various dimensions of the figure. Default
is to use sorted level values.
kind : {"auto", "point", "bar"}, optional
Visual representation of the plot. "auto" uses a few heuristics to
guess whether "bar" or "point" is more appropriate.
markers : list of strings, optional
Marker codes to map the ``hue`` variable with. Only relevant when kind
is "point".
linestyles : list of strings, optional
Linestyle codes to map the ``hue`` variable with. Only relevant when
kind is "point".
dodge : positive scalar, optional
Horizontal offset applies to different ``hue`` levels. Only relevant
when kind is "point".
join : boolean, optional
Whether points from the same level of ``hue`` should be joined. Only
relevant when kind is "point".
size : positive scalar, optional
Height (in inches) of each facet.
aspect : positive scalar, optional
Ratio of facet width to facet height.
palette : seaborn color palette, optional
Palette to map ``hue`` variable with (or ``x`` variable when ``hue``
is None).
legend : boolean, optional
Draw a legend, only if ``hue`` is used and does not overlap with other
variables.
legend_out : boolean, optional
Draw the legend outside the grid; otherwise it is placed within the
first facet.
dropna : boolean, optional
Remove observations that are NA within any variables used to make
the plot.
share{x, y} : booleans, optional
Lock the limits of the vertical and/or horizontal axes across the
facets.
margin_titles : bool, optional
If True and there is a ``row`` variable, draw the titles on the right
margin of the grid (experimental).
Returns
-------
facet : FacetGrid
Returns the :class:`FacetGrid` instance with the plot on it
for further tweaking.
See Also
--------
pointplot : Axes-level function for drawing a point plot
barplot : Axes-level function for drawing a bar plot
boxplot : Axes-level function for drawing a box plot
"""
cols = [a for a in [x, y, hue, col, row, units] if a is not None]
cols = pd.unique(cols).tolist()
data = data[cols]
facet_hue = hue if hue in [row, col] else None
facet_palette = palette if hue in [row, col] else None
# Initialize the grid
facets = FacetGrid(data, row, col, facet_hue, palette=facet_palette,
row_order=row_order, col_order=col_order, dropna=dropna,
size=size, aspect=aspect, col_wrap=col_wrap,
legend_out=legend_out, sharex=sharex, sharey=sharey,
margin_titles=margin_titles)
if kind == "auto":
if y is None:
kind = "bar"
elif (data[y] <= 1).all():
kind = "point"
elif (data[y].mean() / data[y].std()) < 2.5:
kind = "bar"
else:
kind = "point"
# Always use an x_order so that the plot is drawn properly when
# not all of the x variables are represented in each facet
if x_order is None:
x_order = np.sort(pd.unique(data[x]))
# Draw the plot on each facet
kwargs = dict(estimator=estimator, ci=ci, n_boot=n_boot, units=units,
x_order=x_order, hue_order=hue_order, hline=hline)
# Delegate the hue variable to the plotter not the FacetGrid
if hue is not None and hue in [row, col]:
hue = None
else:
kwargs["palette"] = palette
# Plot by mapping a plot function across the facets
if kind == "bar":
facets.map_dataframe(barplot, x, y, hue, **kwargs)
elif kind == "box":
def _boxplot(x, y, hue, data=None, **kwargs):
p = _DiscretePlotter(x, y, hue, data, kind="box", **kwargs)
ax = plt.gca()
p.plot(ax)
facets.map_dataframe(_boxplot, x, y, hue, **kwargs)
elif kind == "point":
kwargs.update(dict(dodge=dodge, join=join,
markers=markers, linestyles=linestyles))
facets.map_dataframe(pointplot, x, y, hue, **kwargs)
# Draw legends and labels
if y is None:
facets.set_axis_labels(x, "count")
facets.fig.tight_layout()
if legend and (hue is not None) and (hue not in [x, row, col]):
facets.add_legend(title=hue, label_order=hue_order)
return facets
def barplot(x, y=None, hue=None, data=None, estimator=np.mean, hline=None,
ci=95, n_boot=1000, units=None, x_order=None, hue_order=None,
dropna=True, color=None, palette=None, label=None, ax=None):
"""Estimate data in categorical bins with a bar representation.
Parameters
----------
x : Vector or string
Data or variable name in `data` for splitting the plot on the x axis.
y : Vector or string, optional
Data or variable name in `data` for the dependent variable. If omitted,
the counts within each bin are plotted (without confidence intervals).
data : DataFrame, optional
Long-form (tidy) dataframe with variables in columns and observations
in rows.
estimator : vector -> scalar function, optional
Function to aggregate `y` values at each level of the factors.
ci : int in {0, 100}, optional
Size of confidene interval to draw around the aggregated value.
n_boot : int, optional
Number of bootstrap resamples used to compute confidence interval.
units : vector, optional
Vector with ids for sampling units; bootstrap will be performed over
these units and then within them.
palette : seaborn color palette, optional
Palette to map `hue` variable with (or `x` variable when `hue` is
None).
dropna : boolean, optional
Remove observations that are NA within any variables used to make
the plot.
Returns
-------
ax : Axes
Returns the matplotlib Axes with the plot on it for further tweaking.
See Also
--------
factorplot : Combine barplot and FacetGrid
pointplot : Axes-level function for drawing a point plot
"""
plotter = _DiscretePlotter(x, y, hue, data, units, x_order, hue_order,
color, palette, "bar", None, None, 0, False,
hline, estimator, ci, n_boot, dropna)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax
def pointplot(x, y, hue=None, data=None, estimator=np.mean, hline=None,
ci=95, n_boot=1000, units=None, x_order=None, hue_order=None,
markers=None, linestyles=None, dodge=0, dropna=True, color=None,
palette=None, join=True, label=None, ax=None):
"""Estimate data in categorical bins with a point representation.
Parameters
----------
x : Vector or string
Data or variable name in `data` for splitting the plot on the x axis.
y : Vector or string, optional
Data or variable name in `data` for the dependent variable. If omitted,
the counts within each bin are plotted (without confidence intervals).
data : DataFrame, optional
Long-form (tidy) dataframe with variables in columns and observations
in rows.
estimator : vector -> scalar function, optional
Function to aggregate `y` values at each level of the factors.
ci : int in {0, 100}, optional
Size of confidene interval to draw around the aggregated value.
n_boot : int, optional
Number of bootstrap resamples used to compute confidence interval.
units : vector, optional
Vector with ids for sampling units; bootstrap will be performed over
these units and then within them.
markers : list of strings, optional
Marker codes to map the `hue` variable with.
linestyles : list of strings, optional
Linestyle codes to map the `hue` variable with.
dodge : positive scalar, optional
Horizontal offset applies to different `hue` levels. Only relevant
when kind is "point".
join : boolean, optional
Whether points from the same level of `hue` should be joined. Only
relevant when kind is "point".
palette : seaborn color palette, optional
Palette to map `hue` variable with (or `x` variable when `hue` is
None).
dropna : boolean, optional
Remove observations that are NA within any variables used to make
the plot.
Returns
-------
ax : Axes
Returns the matplotlib Axes with the plot on it for further tweaking.
See Also
--------
factorplot : Combine pointplot and FacetGrid
barplot : Axes-level function for drawing a bar plot
"""
plotter = _DiscretePlotter(x, y, hue, data, units, x_order, hue_order,
color, palette, "point", markers, linestyles,
dodge, join, hline, estimator, ci, n_boot,
dropna)
if ax is None:
ax = plt.gca()
plotter.plot(ax)
return ax
def regplot(x, y, data=None, x_estimator=None, x_bins=None, x_ci=95,
scatter=True, fit_reg=True, ci=95, n_boot=1000, units=None,
order=1, logistic=False, lowess=False, robust=False,
logx=False, x_partial=None, y_partial=None,
truncate=False, dropna=True, x_jitter=None, y_jitter=None,
xlabel=None, ylabel=None, label=None,
color=None, marker="o", scatter_kws=None, line_kws=None,
ax=None):
"""Draw a scatter plot between x and y with a regression line.
Parameters
----------
x : vector or string
Data or column name in `data` for the predictor variable.
y : vector or string
Data or column name in `data` for the response variable.
data : DataFrame, optional
DataFrame to use if `x` and `y` are column names.
x_estimator : function that aggregates a vector into one value, optional
When `x` is a discrete variable, apply this estimator to the data
at each value and plot the data as a series of point estimates and
confidence intervals rather than a scatter plot.
x_bins : int or vector, optional
When `x` is a continuous variable, use the values in this vector (or
a vector of evenly spaced values with this length) to discretize the
data by assigning each point to the closest bin value. This applies
only to the plot; the regression is fit to the original data. This
implies that `x_estimator` is numpy.mean if not otherwise provided.
x_ci: int between 0 and 100, optional
Confidence interval to compute and draw around the point estimates
when `x` is treated as a discrete variable.
scatter : boolean, optional
Draw the scatter plot or point estimates with CIs representing the
observed data.
fit_reg : boolean, optional
If False, don't fit a regression; just draw the scatterplot.
ci : int between 0 and 100 or None, optional
Confidence interval to compute for regression estimate, which is drawn
as translucent bands around the regression line.
n_boot : int, optional
Number of bootstrap resamples used to compute the confidence intervals.
units : vector or string
Data or column name in `data` with ids for sampling units, so that the
bootstrap is performed by resampling units and then observations within
units for more accurate confidence intervals when data have repeated
measures.
order : int, optional
Order of the polynomial to fit. Use order > 1 to explore higher-order
trends in the relationship.
logistic : boolean, optional
Fit a logistic regression model. This requires `y` to be dichotomous
with values of either 0 or 1.
lowess : boolean, optional
Plot a lowess model (locally weighted nonparametric regression).
robust : boolean, optional
Fit a robust linear regression, which may be useful when the data
appear to have outliers.
logx : boolean, optional
Fit the regression in log(x) space.
{x, y}_partial : matrix or string(s) , optional
Matrix with same first dimension as `x`, or column name(s) in `data`.
These variables are treated as confounding and are removed from
the `x` or `y` variables before plotting.
truncate : boolean, optional
If True, truncate the regression estimate at the minimum and maximum
values of the `x` variable.
dropna : boolean, optional
Remove observations that are NA in at least one of the variables.
{x, y}_jitter : floats, optional
Add uniform random noise from within this range (in data coordinates)
to each datapoint in the x and/or y direction. This can be helpful when
plotting discrete values.
label : string, optional
Label to use for the regression line, or for the scatterplot if not
fitting a regression.
color : matplotlib color, optional
Color to use for all elements of the plot. Can set the scatter and
regression colors separately using the `kws` dictionaries. If not
provided, the current color in the axis cycle is used.
marker : matplotlib marker code, optional
Marker to use for the scatterplot points.
{scatter, line}_kws : dictionaries, optional
Additional keyword arguments passed to scatter() and plot() for drawing
the components of the plot.
ax : matplotlib axis, optional
Plot into this axis, otherwise grab the current axis or make a new
one if not existing.
Returns
-------
ax: matplotlib axes
Axes with the regression plot.
See Also
--------
lmplot : Combine regplot and a FacetGrid.
residplot : Calculate and plot the residuals of a linear model.
jointplot (with kind="reg"): Draw a regplot with univariate marginal
distrbutions.
"""
plotter = _RegressionPlotter(x, y, data, x_estimator, x_bins, x_ci,
scatter, fit_reg, ci, n_boot, units,
order, logistic, lowess, robust, logx,
x_partial, y_partial, truncate, dropna,
x_jitter, y_jitter, color, label)
if ax is None:
ax = plt.gca()
scatter_kws = {} if scatter_kws is None else copy.copy(scatter_kws)
scatter_kws["marker"] = marker
line_kws = {} if line_kws is None else copy.copy(line_kws)
plotter.plot(ax, scatter_kws, line_kws)
return ax
def residplot(x, y, data=None, lowess=False, x_partial=None, y_partial=None,
order=1, robust=False, dropna=True, label=None, color=None,
scatter_kws=None, line_kws=None, ax=None):
"""Plot the residuals of a linear regression.
This function will regress y on x (possibly as a robust or polynomial
regression) and then draw a scatterplot of the residuals. You can
optionally fit a lowess smoother to the residual plot, which can
help in determining if there is structure to the residuals.
Parameters
----------
x : vector or string
Data or column name in `data` for the predictor variable.
y : vector or string
Data or column name in `data` for the response variable.
data : DataFrame, optional
DataFrame to use if `x` and `y` are column names.
lowess : boolean, optional
Fit a lowess smoother to the residual scatterplot.
{x, y}_partial : matrix or string(s) , optional
Matrix with same first dimension as `x`, or column name(s) in `data`.
These variables are treated as confounding and are removed from
the `x` or `y` variables before plotting.
order : int, optional
Order of the polynomial to fit when calculating the residuals.
robust : boolean, optional
Fit a robust linear regression when calculating the residuals.
dropna : boolean, optional
If True, ignore observations with missing data when fitting and
plotting.
label : string, optional
Label that will be used in any plot legends.
color : matplotlib color, optional
Color to use for all elements of the plot.
{scatter, line}_kws : dictionaries, optional
Additional keyword arguments passed to scatter() and plot() for drawing
the components of the plot.
ax : matplotlib axis, optional
Plot into this axis, otherwise grab the current axis or make a new
one if not existing.
Returns
-------
ax: matplotlib axes
Axes with the regression plot.
See Also
--------
regplot : Plot a simple linear regression model.
jointplot (with kind="resid"): Draw a residplot with univariate
marginal distrbutions.
"""
plotter = _RegressionPlotter(x, y, data, ci=None,
order=order, robust=robust,
x_partial=x_partial, y_partial=y_partial,
dropna=dropna, color=color, label=label)
if ax is None:
ax = plt.gca()
# Calculate the residual from a linear regression
_, yhat, _ = plotter.fit_regression(grid=plotter.x)
plotter.y = plotter.y - yhat
# Set the regression option on the plotter
if lowess:
plotter.lowess = True
else:
plotter.fit_reg = False
# Plot a horizontal line at 0
ax.axhline(0, ls=":", c=".2")
# Draw the scatterplot
scatter_kws = {} if scatter_kws is None else scatter_kws
line_kws = {} if line_kws is None else line_kws
plotter.plot(ax, scatter_kws, line_kws)
return ax
def coefplot(formula, data, groupby=None, intercept=False, ci=95,
palette="husl"):
"""Plot the coefficients from a linear model.
Parameters
----------
formula : string
patsy formula for ols model
data : dataframe
data for the plot; formula terms must appear in columns
groupby : grouping object, optional
object to group data with to fit conditional models
intercept : bool, optional
if False, strips the intercept term before plotting
ci : float, optional
size of confidence intervals
palette : seaborn color palette, optional
palette for the horizonal plots
"""
if not _has_statsmodels:
raise ImportError("The `coefplot` function requires statsmodels")
import statsmodels.formula.api as sf
alpha = 1 - ci / 100
if groupby is None:
coefs = sf.ols(formula, data).fit().params
cis = sf.ols(formula, data).fit().conf_int(alpha)
else:
grouped = data.groupby(groupby)
coefs = grouped.apply(lambda d: sf.ols(formula, d).fit().params).T
cis = grouped.apply(lambda d: sf.ols(formula, d).fit().conf_int(alpha))
# Possibly ignore the intercept
if not intercept:
coefs = coefs.ix[1:]
n_terms = len(coefs)
# Plot seperately depending on groupby
w, h = mpl.rcParams["figure.figsize"]
hsize = lambda n: n * (h / 2)
wsize = lambda n: n * (w / (4 * (n / 5)))
if groupby is None:
colors = itertools.cycle(color_palette(palette, n_terms))
f, ax = plt.subplots(1, 1, figsize=(wsize(n_terms), hsize(1)))
for i, term in enumerate(coefs.index):
color = next(colors)
low, high = cis.ix[term]
ax.plot([i, i], [low, high], c=color,
solid_capstyle="round", lw=2.5)
ax.plot(i, coefs.ix[term], "o", c=color, ms=8)
ax.set_xlim(-.5, n_terms - .5)
ax.axhline(0, ls="--", c="dimgray")
ax.set_xticks(range(n_terms))
ax.set_xticklabels(coefs.index)
else:
n_groups = len(coefs.columns)
f, axes = plt.subplots(n_terms, 1, sharex=True,
figsize=(wsize(n_groups), hsize(n_terms)))
if n_terms == 1:
axes = [axes]
colors = itertools.cycle(color_palette(palette, n_groups))
for ax, term in zip(axes, coefs.index):
for i, group in enumerate(coefs.columns):
color = next(colors)
low, high = cis.ix[(group, term)]
ax.plot([i, i], [low, high], c=color,
solid_capstyle="round", lw=2.5)
ax.plot(i, coefs.loc[term, group], "o", c=color, ms=8)
ax.set_xlim(-.5, n_groups - .5)
ax.axhline(0, ls="--", c="dimgray")
ax.set_title(term)
ax.set_xlabel(groupby)
ax.set_xticks(range(n_groups))
ax.set_xticklabels(coefs.columns)
def interactplot(x1, x2, y, data=None, filled=False, cmap="RdBu_r",
colorbar=True, levels=30, logistic=False,
contour_kws=None, scatter_kws=None, ax=None, **kwargs):
"""Visualize a continuous two-way interaction with a contour plot.
Parameters
----------
x1, x2, y, strings or array-like
Either the two independent variables and the dependent variable,
or keys to extract them from `data`
data : DataFrame
Pandas DataFrame with the data in the columns.
filled : bool
Whether to plot with filled or unfilled contours
cmap : matplotlib colormap
Colormap to represent yhat in the countour plot.
colorbar : bool
Whether to draw the colorbar for interpreting the color values.
levels : int or sequence
Number or position of contour plot levels.
logistic : bool
Fit a logistic regression model instead of linear regression.
contour_kws : dictionary
Keyword arguments for contour[f]().
scatter_kws : dictionary
Keyword arguments for plot().
ax : matplotlib axis
Axis to draw plot in.
Returns
-------
ax : Matplotlib axis
Axis with the contour plot.
"""
if not _has_statsmodels:
raise ImportError("The `interactplot` function requires statsmodels")
from statsmodels.regression.linear_model import OLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families import Binomial
# Handle the form of the data
if data is not None:
x1 = data[x1]
x2 = data[x2]
y = data[y]
if hasattr(x1, "name"):
xlabel = x1.name
else:
xlabel = None
if hasattr(x2, "name"):
ylabel = x2.name
else:
ylabel = None
if hasattr(y, "name"):
clabel = y.name
else:
clabel = None
x1 = np.asarray(x1)
x2 = np.asarray(x2)
y = np.asarray(y)
# Initialize the scatter keyword dictionary
if scatter_kws is None:
scatter_kws = {}
if not ("color" in scatter_kws or "c" in scatter_kws):
scatter_kws["color"] = "#222222"
if "alpha" not in scatter_kws:
scatter_kws["alpha"] = 0.75
# Intialize the contour keyword dictionary
if contour_kws is None:
contour_kws = {}
# Initialize the axis
if ax is None:
ax = plt.gca()
# Plot once to let matplotlib sort out the axis limits
ax.plot(x1, x2, "o", **scatter_kws)
# Find the plot limits
x1min, x1max = ax.get_xlim()
x2min, x2max = ax.get_ylim()
# Make the grid for the contour plot
x1_points = np.linspace(x1min, x1max, 100)
x2_points = np.linspace(x2min, x2max, 100)
xx1, xx2 = np.meshgrid(x1_points, x2_points)
# Fit the model with an interaction
X = np.c_[np.ones(x1.size), x1, x2, x1 * x2]
if logistic:
lm = GLM(y, X, family=Binomial()).fit()
else:
lm = OLS(y, X).fit()
# Evaluate the model on the grid
eval = np.vectorize(lambda x1_, x2_: lm.predict([1, x1_, x2_, x1_ * x2_]))
yhat = eval(xx1, xx2)
# Default color limits put the midpoint at mean(y)
y_bar = y.mean()
c_min = min(np.percentile(y, 2), yhat.min())
c_max = max(np.percentile(y, 98), yhat.max())
delta = max(c_max - y_bar, y_bar - c_min)
c_min, cmax = y_bar - delta, y_bar + delta
contour_kws.setdefault("vmin", c_min)
contour_kws.setdefault("vmax", c_max)
# Draw the contour plot
func_name = "contourf" if filled else "contour"
contour = getattr(ax, func_name)
c = contour(xx1, xx2, yhat, levels, cmap=cmap, **contour_kws)
# Draw the scatter again so it's visible
ax.plot(x1, x2, "o", **scatter_kws)
# Draw a colorbar, maybe
if colorbar:
bar = plt.colorbar(c)
# Label the axes
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if clabel is not None and colorbar:
clabel = "P(%s)" % clabel if logistic else clabel
bar.set_label(clabel, labelpad=15, rotation=270)
return ax
def corrplot(data, names=None, annot=True, sig_stars=True, sig_tail="both",
sig_corr=True, cmap=None, cmap_range=None, cbar=True,
diag_names=True, method=None, ax=None, **kwargs):
"""Plot a correlation matrix with colormap and r values.
Parameters
----------
data : Dataframe or nobs x nvars array
Rectangular input data with variabes in the columns.
names : sequence of strings
Names to associate with variables if `data` is not a DataFrame.
annot : bool
Whether to annotate the upper triangle with correlation coefficients.
sig_stars : bool
If True, get significance with permutation test and denote with stars.
sig_tail : both | upper | lower
Direction for significance test. Also controls the default colorbar.
sig_corr : bool
If True, use FWE-corrected p values for the sig stars.
cmap : colormap
Colormap name as string or colormap object.
cmap_range : None, "full", (low, high)
Either truncate colormap at (-max(abs(r)), max(abs(r))), use the
full range (-1, 1), or specify (min, max) values for the colormap.
cbar : bool
If true, plot the colorbar legend.
method: None (pearson) | kendall | spearman
Correlation method to compute pairwise correlations. Methods other
than the default pearson correlation will not have a significance
computed.
ax : matplotlib axis
Axis to draw plot in.
kwargs : other keyword arguments
Passed to ax.matshow()
Returns
-------
ax : matplotlib axis
Axis object with plot.
"""
if not isinstance(data, pd.DataFrame):
if names is None:
names = ["var_%d" % i for i in range(data.shape[1])]
data = pd.DataFrame(data, columns=names, dtype=np.float)
# Calculate the correlation matrix of the dataframe
if method is None:
corrmat = data.corr()
else:
corrmat = data.corr(method=method)
# Pandas will drop non-numeric columns; let's keep track of that operation
names = corrmat.columns
data = data[names]
# Get p values with a permutation test
if annot and sig_stars and method is None:
p_mat = algo.randomize_corrmat(data.values.T, sig_tail, sig_corr)
else:
p_mat = None
# Sort out the color range
if cmap_range is None:
triu = np.triu_indices(len(corrmat), 1)
vmax = min(1, np.max(np.abs(corrmat.values[triu])) * 1.15)
vmin = -vmax
if sig_tail == "both":
cmap_range = vmin, vmax
elif sig_tail == "upper":
cmap_range = 0, vmax
elif sig_tail == "lower":
cmap_range = vmin, 0
elif cmap_range == "full":
cmap_range = (-1, 1)
# Find a colormapping, somewhat intelligently
if cmap is None:
if min(cmap_range) >= 0:
cmap = "OrRd"
elif max(cmap_range) <= 0:
cmap = "PuBu_r"
else:
cmap = "coolwarm"
if cmap == "jet":
# Paternalism
raise ValueError("Never use the 'jet' colormap!")
# Plot using the more general symmatplot function
ax = symmatplot(corrmat, p_mat, names, cmap, cmap_range,
cbar, annot, diag_names, ax, **kwargs)
return ax
def symmatplot(mat, p_mat=None, names=None, cmap="Greys", cmap_range=None,
cbar=True, annot=True, diag_names=True, ax=None, **kwargs):
"""Plot a symmetric matrix with colormap and statistic values."""
if ax is None:
ax = plt.gca()
nvars = len(mat)
if isinstance(mat, pd.DataFrame):
plotmat = mat.values.copy()
mat = mat.values
else:
plotmat = mat.copy()
plotmat[np.triu_indices(nvars)] = np.nan
if cmap_range is None:
vmax = np.nanmax(plotmat) * 1.15
vmin = np.nanmin(plotmat) * 1.15
elif len(cmap_range) == 2:
vmin, vmax = cmap_range
else:
raise ValueError("cmap_range argument not understood")
mat_img = ax.matshow(plotmat, cmap=cmap, vmin=vmin, vmax=vmax, **kwargs)
if cbar:
plt.colorbar(mat_img, shrink=.75)
if p_mat is None:
p_mat = np.ones((nvars, nvars))
if annot:
for i, j in zip(*np.triu_indices(nvars, 1)):
val = mat[i, j]
stars = utils.sig_stars(p_mat[i, j])
ax.text(j, i, "\n%.2g\n%s" % (val, stars),
fontdict=dict(ha="center", va="center"))
else:
fill = np.ones_like(plotmat)
fill[np.tril_indices_from(fill, -1)] = np.nan
ax.matshow(fill, cmap="Greys", vmin=0, vmax=0, zorder=2)
if names is None:
names = ["var%d" % i for i in range(nvars)]
if diag_names:
for i, name in enumerate(names):
ax.text(i, i, name, fontdict=dict(ha="center", va="center",
weight="bold", rotation=45))
ax.set_xticklabels(())
ax.set_yticklabels(())
else:
ax.xaxis.set_ticks_position("bottom")
xnames = names if annot else names[:-1]
ax.set_xticklabels(xnames, rotation=90)
ynames = names if annot else names[1:]
ax.set_yticklabels(ynames)
minor_ticks = np.linspace(-.5, nvars - 1.5, nvars)
ax.set_xticks(minor_ticks, True)
ax.set_yticks(minor_ticks, True)
major_ticks = np.linspace(0, nvars - 1, nvars)
xticks = major_ticks if annot else major_ticks[:-1]
ax.set_xticks(xticks)
yticks = major_ticks if annot else major_ticks[1:]
ax.set_yticks(yticks)
ax.grid(False, which="major")
ax.grid(True, which="minor", linestyle="-")
return ax
def pairplot(data, hue=None, hue_order=None, palette=None,
vars=None, x_vars=None, y_vars=None,
kind="scatter", diag_kind="hist", markers=None,
size=3, aspect=1, dropna=True,
plot_kws=None, diag_kws=None, grid_kws=None):
"""Plot pairwise relationships in a dataset.
Parameters
----------
data : DataFrame
Tidy (long-form) dataframe where each column is a variable and
each row is an observation.
hue : string (variable name), optional
Variable in ``data`` to map plot aspects to different colors.
hue_order : list of strings
Order for the levels of the hue variable in the palette
palette : dict or seaborn color palette
Set of colors for mapping the ``hue`` variable. If a dict, keys
should be values in the ``hue`` variable.
vars : list of variable names, optional
Variables within ``data`` to use, otherwise use every column with
a numeric datatype.
{x, y}_vars : lists of variable names, optional
Variables within ``data`` to use separately for the rows and
columns of the figure; i.e. to make a non-square plot.
kind : {'scatter', 'reg'}, optional
Kind of plot for the non-identity relationships.
diag_kind : {'hist', 'kde'}, optional
Kind of plot for the diagonal subplots.
markers : single matplotlib marker code or list, optional
Either the marker to use for all datapoints or a list of markers with
a length the same as the number of levels in the hue variable so that
differently colored points will also have different scatterplot
markers.
size : scalar, optional
Height (in inches) of each facet.
aspect : scalar, optional
Aspect * size gives the width (in inches) of each facet.
dropna : boolean, optional
Drop missing values from the data before plotting.
{plot, diag, grid}_kws : dicts, optional
Dictionaries of keyword arguments.
Returns
-------
grid : PairGrid
Returns the underlying ``PairGrid`` instance for further tweaking.
See Also
--------
PairGrid : Subplot grid for more flexible plotting of pairwise
relationships.
"""
if plot_kws is None:
plot_kws = {}
if diag_kws is None:
diag_kws = {}
if grid_kws is None:
grid_kws = {}
# Set up the PairGrid
diag_sharey = diag_kind == "hist"
grid = PairGrid(data, vars=vars, x_vars=x_vars, y_vars=y_vars, hue=hue,
hue_order=hue_order, palette=palette,
diag_sharey=diag_sharey,
size=size, aspect=aspect, dropna=dropna, **grid_kws)
# Add the markers here as PairGrid has figured out how many levels of the
# hue variable are needed and we don't want to duplicate that process
if markers is not None:
if grid.hue_names is None:
n_markers = 1
else:
n_markers = len(grid.hue_names)
if not isinstance(markers, list):
markers = [markers] * n_markers
if len(markers) != n_markers:
raise ValueError(("markers must be a singeton or a list of markers"
" for each level of the hue variable"))
grid.hue_kws = {"marker": markers}
# Maybe plot on the diagonal
if grid.square_grid:
if diag_kind == "hist":
grid.map_diag(plt.hist, **diag_kws)
elif diag_kind == "kde":
diag_kws["legend"] = False
grid.map_diag(kdeplot, **diag_kws)
# Maybe plot on the off-diagonals
if grid.square_grid and diag_kind is not None:
plotter = grid.map_offdiag
else:
plotter = grid.map
if kind == "scatter":
plot_kws.setdefault("edgecolor", "white")
plotter(plt.scatter, **plot_kws)
elif kind == "reg":
plotter(regplot, **plot_kws)
# Add a legend
if hue is not None:
grid.add_legend()
return grid
| bsd-3-clause |
vsanca/TSDC | TensorDetector/code/to_json.py | 1 | 3315 | # -*- coding: utf-8 -*-
"""
Tool for converting existing GTSDB annotations to JSON accepted by TensorBox,
in accordance to brainwash dataset example JSON file.
[
{
"image_path": $path (string),
"rects": [
{
"x1":$x1 (float),
"x2":$x2 (float),
"y1":$y1 (float),
"y2":$y2 (float)
},...
]
},...
]
@author: viktor
"""
JSON_FILE = '/home/student/Desktop/projekat/Data/FullIJCNN2013/train_boxes.json'
GTSDB_FILE = '/home/student/Desktop/projekat/Data/FullIJCNN2013/gt.txt'
GTSDB_FILE_RESIZED = '/home/student/Desktop/projekat/Data/FullIJCNN2013/gt_resized.txt'
IMG_TEST = '/home/student/Desktop/tensorbox/data/brainwash/gtsdb/00000.png'
import matplotlib.pyplot as plt
import matplotlib.image as img
def parse_gtsdb_to_json(gtsdb_path, json_out):
gtsdb = open(gtsdb_path, 'r')
json = open(json_out, 'w')
lines_in = []
for line in gtsdb:
lines_in.append(line)
json.writelines('[\n')
current_image = ''
for line in lines_in:
parts = line.split()
parts = parts[0].split(';')
if(current_image==parts[0]):
json.writelines('\t\t,{\n')
json.writelines('\t\t\t"x1":'+str(parts[1])+',\n')
json.writelines('\t\t\t"x2":'+str(parts[3])+',\n')
json.writelines('\t\t\t"y1":'+str(parts[2])+',\n')
json.writelines('\t\t\t"y2":'+str(parts[4])+'\n')
json.writelines('\t\t}\n')
else:
if(current_image!=''):
json.writelines('\t\t]\n')
json.writelines('\t},\n')
current_image=parts[0]
json.writelines('\t{\n')
json.writelines('\t\t"image_path": "gtsdb_train/'+parts[0].replace('.ppm','.png')+'",\n')
json.writelines('\t\t"rects": [\n')
json.writelines('\t\t{\n')
json.writelines('\t\t\t"x1":'+str(parts[1])+',\n')
json.writelines('\t\t\t"x2":'+str(parts[3])+',\n')
json.writelines('\t\t\t"y1":'+str(parts[2])+',\n')
json.writelines('\t\t\t"y2":'+str(parts[4])+'\n')
json.writelines('\t\t}\n')
json.writelines('\t\t]\n\t}\n')
json.writelines(']')
def resize_boxes(gtsdb_path, gtsdb_path_new, current_size, new_size):
gtsdb = open(gtsdb_path, 'r')
gtsdb_new = open(gtsdb_path_new, 'w')
for line in gtsdb:
parts = line.split()
parts = parts[0].split(';')
x1 = parts[1]
x2 = parts[3]
y1 = parts[2]
y2 = parts[4]
x1_n = int(float(x1)/current_size[0]*float(new_size[0]))
x2_n = int(float(x2)/current_size[0]*float(new_size[0]))
y1_n = int(float(y1)/current_size[1]*float(new_size[1]))
y2_n = int(float(y2)/current_size[1]*float(new_size[1]))
gtsdb_new.writelines(parts[0]+';'+str(x1_n)+';'+str(y1_n)+';'+str(x2_n)+';'+str(y2_n)+';'+parts[5]+'\n')
# Testing the results on images
import skimage.transform as transform
import cv2
img = cv2.imread(IMG_TEST)
#img = transform.resize(img, (480,640))
cv2.rectangle(img,(364,246),(383,267),(0,255,0),3)
cv2.imshow('hey',img) | gpl-3.0 |
decvalts/cartopy | lib/cartopy/tests/mpl/test_gridliner.py | 1 | 6239 | # (C) British Crown Copyright 2011 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from matplotlib.backends.backend_agg import FigureCanvasAgg
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
try:
from unittest import mock
except ImportError:
import mock
import pytest
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import GeoAxes
from cartopy.mpl.gridliner import LATITUDE_FORMATTER, LONGITUDE_FORMATTER
from cartopy.tests.mpl import MPL_VERSION, ImageTesting
@pytest.mark.natural_earth
@ImageTesting(['gridliner1'])
def test_gridliner():
ny, nx = 2, 4
plt.figure(figsize=(10, 10))
ax = plt.subplot(nx, ny, 1, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
ax.gridlines()
ax = plt.subplot(nx, ny, 2, projection=ccrs.OSGB())
ax.set_global()
ax.coastlines()
ax.gridlines()
ax = plt.subplot(nx, ny, 3, projection=ccrs.OSGB())
ax.set_global()
ax.coastlines()
ax.gridlines(ccrs.PlateCarree(), color='blue', linestyle='-')
ax.gridlines(ccrs.OSGB())
ax = plt.subplot(nx, ny, 4, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
ax.gridlines(ccrs.NorthPolarStereo(), alpha=0.5,
linewidth=1.5, linestyle='-')
ax = plt.subplot(nx, ny, 5, projection=ccrs.PlateCarree())
ax.set_global()
ax.coastlines()
osgb = ccrs.OSGB()
ax.set_extent(tuple(osgb.x_limits) + tuple(osgb.y_limits), crs=osgb)
ax.gridlines(osgb)
ax = plt.subplot(nx, ny, 6, projection=ccrs.NorthPolarStereo())
ax.set_global()
ax.coastlines()
ax.gridlines(alpha=0.5, linewidth=1.5, linestyle='-')
ax = plt.subplot(nx, ny, 7, projection=ccrs.NorthPolarStereo())
ax.set_global()
ax.coastlines()
osgb = ccrs.OSGB()
ax.set_extent(tuple(osgb.x_limits) + tuple(osgb.y_limits), crs=osgb)
ax.gridlines(osgb)
ax = plt.subplot(nx, ny, 8,
projection=ccrs.Robinson(central_longitude=135))
ax.set_global()
ax.coastlines()
ax.gridlines(ccrs.PlateCarree(), alpha=0.5, linewidth=1.5, linestyle='-')
delta = 1.5e-2
plt.subplots_adjust(left=0 + delta, right=1 - delta,
top=1 - delta, bottom=0 + delta)
def test_gridliner_specified_lines():
xs = [0, 60, 120, 180, 240, 360]
ys = [-90, -60, -30, 0, 30, 60, 90]
ax = mock.Mock(_gridliners=[], spec=GeoAxes)
gl = GeoAxes.gridlines(ax, xlocs=xs, ylocs=ys)
assert isinstance(gl.xlocator, mticker.FixedLocator)
assert isinstance(gl.ylocator, mticker.FixedLocator)
assert gl.xlocator.tick_values(None, None).tolist() == xs
assert gl.ylocator.tick_values(None, None).tolist() == ys
# The tolerance on this test is particularly high because of the high number
# of text objects. A new testing strategy is needed for this kind of test.
if MPL_VERSION >= '2.0':
grid_label_image = 'gridliner_labels'
elif MPL_VERSION >= '1.5':
grid_label_image = 'gridliner_labels_1.5'
else:
grid_label_image = 'gridliner_labels_pre_mpl_1.5'
@pytest.mark.natural_earth
@ImageTesting([grid_label_image])
def test_grid_labels():
plt.figure(figsize=(8, 10))
crs_pc = ccrs.PlateCarree()
crs_merc = ccrs.Mercator()
crs_osgb = ccrs.OSGB()
ax = plt.subplot(3, 2, 1, projection=crs_pc)
ax.coastlines()
ax.gridlines(draw_labels=True)
# Check that adding labels to Mercator gridlines gives an error.
# (Currently can only label PlateCarree gridlines.)
ax = plt.subplot(3, 2, 2,
projection=ccrs.PlateCarree(central_longitude=180))
ax.coastlines()
with pytest.raises(TypeError):
ax.gridlines(crs=crs_merc, draw_labels=True)
ax.set_title('Known bug')
gl = ax.gridlines(crs=crs_pc, draw_labels=True)
gl.xlabels_top = False
gl.ylabels_left = False
gl.xlines = False
ax = plt.subplot(3, 2, 3, projection=crs_merc)
ax.coastlines()
ax.gridlines(draw_labels=True)
# Check that labelling the gridlines on an OSGB plot gives an error.
# (Currently can only draw these on PlateCarree or Mercator plots.)
ax = plt.subplot(3, 2, 4, projection=crs_osgb)
ax.coastlines()
with pytest.raises(TypeError):
ax.gridlines(draw_labels=True)
ax = plt.subplot(3, 2, 4, projection=crs_pc)
ax.coastlines()
gl = ax.gridlines(
crs=crs_pc, linewidth=2, color='gray', alpha=0.5, linestyle='--')
gl.xlabels_bottom = True
gl.ylabels_right = True
gl.xlines = False
gl.xlocator = mticker.FixedLocator([-180, -45, 45, 180])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 15, 'color': 'gray'}
gl.xlabel_style = {'color': 'red'}
gl.xpadding = 10
gl.ypadding = 15
# trigger a draw at this point and check the appropriate artists are
# populated on the gridliner instance
FigureCanvasAgg(plt.gcf()).draw()
assert len(gl.xlabel_artists) == 4
assert len(gl.ylabel_artists) == 5
assert len(gl.ylabel_artists) == 5
assert len(gl.xline_artists) == 0
ax = plt.subplot(3, 2, 5, projection=crs_pc)
ax.set_extent([-20, 10.0, 45.0, 70.0])
ax.coastlines()
ax.gridlines(draw_labels=True)
ax = plt.subplot(3, 2, 6, projection=crs_merc)
ax.set_extent([-20, 10.0, 45.0, 70.0], crs=crs_pc)
ax.coastlines()
ax.gridlines(draw_labels=True)
# Increase margins between plots to stop them bumping into one another.
plt.subplots_adjust(wspace=0.25, hspace=0.25)
| gpl-3.0 |
arjoly/scikit-learn | sklearn/metrics/__init__.py | 214 | 3440 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
stylianos-kampakis/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 233 | 7819 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <hagberg@lanl.gov>
Dan Schult <dschult@colgate.edu>
Pieter Swart <swart@lanl.gov>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
hainm/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
aabadie/scikit-learn | examples/decomposition/plot_pca_iris.py | 65 | 1485 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
PCA example with Iris Data-set
=========================================================
Principal Component Analysis applied to the Iris dataset.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import decomposition
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
fig = plt.figure(1, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
pca = decomposition.PCA(n_components=3)
pca.fit(X)
X = pca.transform(X)
for name, label in [('Setosa', 0), ('Versicolour', 1), ('Virginica', 2)]:
ax.text3D(X[y == label, 0].mean(),
X[y == label, 1].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y, cmap=plt.cm.spectral)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
pnedunuri/scikit-learn | sklearn/utils/testing.py | 71 | 26178 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under Python < 3.4 and POSIX (e.g. Linux or OSX), using multiprocessing in
conjunction with some implementation of BLAS (or other libraries that
manage an internal posix thread pool) can cause a crash or a freeze of the
Python process.
Under Python 3.4 and later, joblib uses the forkserver mode of
multiprocessing which does not trigger this problem.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OSX with.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin' and sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
kcompher/pyLDAvis | pyLDAvis/graphlab.py | 6 | 2460 | """
pyLDAvis GraphLab
===============
Helper functions to visualize GraphLab Create's TopicModel (an implementation of LDA)
"""
from __future__ import absolute_import
import funcy as fp
import numpy as np
import pandas as pd
import graphlab as gl
import pyLDAvis
def _topics_as_df(topic_model):
tdf = topic_model['topics'].to_dataframe()
return pd.DataFrame(np.vstack(tdf['topic_probabilities'].values), index=tdf['vocabulary'])
def _sum_sarray_dicts(sarray):
counts_sf = gl.SFrame({'count_dicts': sarray}).stack('count_dicts').groupby(key_columns='X1',
operations={'count': gl.aggregate.SUM('X2')})
return counts_sf.unstack(column=['X1', 'count'])[0].values()[0]
def _extract_doc_data(docs):
doc_lengths = list(docs.apply(lambda d: np.array(d.values()).sum()))
term_freqs_dict = _sum_sarray_dicts(docs)
vocab = term_freqs_dict.keys()
term_freqs = term_freqs_dict.values()
return {'doc_lengths': doc_lengths, 'vocab': vocab, 'term_frequency': term_freqs}
def _extract_model_data(topic_model, docs, vocab):
doc_topic_dists = np.vstack(topic_model.predict(docs, output_type='probabilities'))
topics = _topics_as_df(topic_model)
topic_term_dists = topics.T[vocab].values
return {'topic_term_dists': topic_term_dists, 'doc_topic_dists': doc_topic_dists}
def _extract_data(topic_model, docs):
doc_data = _extract_doc_data(docs)
model_data = _extract_model_data(topic_model, docs, doc_data['vocab'])
return fp.merge(doc_data, model_data)
def prepare(topic_model, docs, **kargs):
"""Transforms the GraphLab TopicModel and related corpus data into
the data structures needed for the visualization.
Parameters
----------
topic_model : graphlab.toolkits.topic_model.topic_model.TopicModel
An already trained GraphLab topic model.
docs : SArray of dicts
The corpus in bag of word form, the same docs used to train the model.
**kwargs :
additional keyword arguments are passed through to :func:`pyldavis.prepare`.
Returns
-------
prepared_data : PreparedData
the data structures used in the visualization
Example
--------
For example usage please see this notebook:
http://nbviewer.ipython.org/github/bmabey/pyLDAvis/blob/master/notebooks/GraphLab.ipynb
"""
opts = fp.merge(_extract_data(topic_model, docs), kargs)
return pyLDAvis.prepare(**opts)
| bsd-3-clause |
nhuntwalker/astroML | book_figures/chapter8/fig_cross_val_A.py | 3 | 2169 | """
Cross Validation Examples: Part 1
---------------------------------
Figure 8.12
Our toy data set described by eq. 8.75. Shown is the line of best fit, which
quite clearly underfits the data. In other words, a linear model in this case
has high bias.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import ticker
from matplotlib.patches import FancyArrow
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Define our functional form
def func(x, dy=0.1):
return np.random.normal(np.sin(x) * x, dy)
#------------------------------------------------------------
# select the (noisy) data
np.random.seed(0)
x = np.linspace(0, 3, 22)[1:-1]
dy = 0.1
y = func(x, dy)
#------------------------------------------------------------
# Select the cross-validation points
np.random.seed(1)
x_cv = 3 * np.random.random(20)
y_cv = func(x_cv)
x_fit = np.linspace(0, 3, 1000)
#------------------------------------------------------------
# First figure: plot points with a linear fit
fig = plt.figure(figsize=(5, 3.75))
ax = fig.add_subplot(111)
ax.scatter(x, y, marker='x', c='k', s=30)
p = np.polyfit(x, y, 1)
y_fit = np.polyval(p, x_fit)
ax.text(0.03, 0.96, "d = 1", transform=plt.gca().transAxes,
ha='left', va='top',
bbox=dict(ec='k', fc='w', pad=10))
ax.plot(x_fit, y_fit, '-b')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
plt.show()
| bsd-2-clause |
kentwait/bioseq | bioseq/arrays.py | 1 | 50873 | from .basetypes import *
import os
import numpy as np
import re
from collections.abc import MutableMapping
from collections import OrderedDict, Counter
import collections
from copy import deepcopy
from scipy.sparse import lil_matrix
import pandas as pd
__all__ = ['SEQTYPES', 'validate_sequence_chars',
'SequenceArray', 'NucleotideArray', 'ProteinArray', 'CodonArray',
'SequenceAlignment', 'NucleotideAlignment', 'ProteinAlignment', 'CodonAlignment']
SEQTYPES = ('nucl', 'prot', 'cod')
def validate_sequence_chars(seq, seqtype='nucl'):
assert isinstance(seq, str), TypeError('seq should be str or string-like.')
assert seqtype in SEQTYPES, ValueError('seqtype must be "nucl" for nucleotide, "prot" for protein, '
'or "cod" for codon.')
pattern = '[^{}]'.format(AMINO_ACIDS if seqtype == 'prot' else BASES)
invalid_chars = set(re.findall(pattern, seq, re.IGNORECASE))
if len(invalid_chars) > 0:
raise ValueError('Sequence contains invalid characters: {}. Check sequence type or individual sequences.'
.format(repr(invalid_chars)))
return seq
class SequenceArray(MutableMapping):
"""
Multiple sequence array object constructor
Stores one or more biological sequences as a set of id (key) - sequence string (value) pair based on its original
input order by using an OrderedDict container.
SequenceArray can be instantiated by passing a dictionary-like object whose keys are names or descriptions of their
corresponding sequence string value. The string value can be a nucleotide sequence (nucl), codon sequence (cod),
or protein sequence (prot). Note that the string value type, whether 'nucl', 'prot', or 'cod, must be the same
for all items in the SequenceArray.
"""
def __init__(self, input_obj, seqtype='nucl', name='', description='', validate=False):
"""Create a new SequenceArray object from a dictionary, file or FASTA-formatted string.
Parameters
----------
input_obj : dict or str
Object used to populate a SequenceArray object. This may be one of the following:
- Dictionary-like object whose id are sequence record names and values are the corresponding sequences
- Path to a FASTA file
- FASTA-formatted string
seqtype : str
'nucl' (Nucleotide), 'prot' (Protein), 'cod' (Codon-based)
name : str
Name of the set of sequence records
description : str
Short description
"""
assert seqtype in SEQTYPES, ValueError('seqtype must be "nucl" for nucleotide, "prot" for protein, '
'or "cod" for codon.')
self.seqtype = seqtype
self.name = name
self.description = description
# Key-value pairs where id is the sequence record name and the value is the sequence
if isinstance(input_obj, dict):
records = input_obj
self._ids = list(records.keys())
self._sequences = list(records.values())
# String, assumed to be path to FASTA file or contents of FASTA file as a long string
elif isinstance(input_obj, str):
# Test if file path
if os.path.exists(input_obj):
records = SequenceArray.parse_fasta(input_obj, seqtype=seqtype)
# self._ids = [key.split(' ')[0] for key in records.keys()]
self._ids = list(records.keys()) # preprocessing already done by parse_fasta method
self._sequences = list(records.values())
else:
raise NotImplementedError('Passing FASTA-formatted strings are not yet supported. '
'Instantiate using an OrderedDict or passing a valid filepath instead.')
if validate:
# Check if sequences contain invalid characters
if seqtype in ('nucl', 'prot'):
self._sequences = [validate_sequence_chars(_, seqtype=seqtype) for _ in self._sequences]
else: # codon seqtype
pass
@property
def ids(self):
return self._ids
@ids.setter
def ids(self, value):
raise AttributeError('Setting ids using this method is not permitted.')
@ids.deleter
def ids(self):
raise AttributeError('Deleting ids using this method is not permitted.')
@property
def sequences(self):
return self._sequences
@sequences.setter
def sequences(self, value):
raise AttributeError('Setting sequences using this method is not permitted.')
@sequences.deleter
def sequences(self):
raise AttributeError('Deleting sequences using this method is not permitted.')
def __setitem__(self, key, sequence):
self.ids.append(key)
self.sequences.append(sequence)
def __getitem__(self, keys):
if isinstance(keys, collections.Iterable) and not isinstance(keys, str):
return_list = []
for key in keys:
if key in self.ids:
return_list.append(self.sequences[self.ids.index(key)])
else:
raise KeyError('Key "{0}" does not exist'.format(key))
return return_list
else:
key = keys
if key in self.ids:
return self.sequences[self.ids.index(key)]
else:
raise KeyError('Key "{0}" does not exist'.format(key))
def __delitem__(self, key):
if key in self.ids:
index = self.ids.index(key)
self.ids.remove(key)
self.sequences.pop(index)
else:
raise KeyError('Key "{0}" does not exist'.format(key))
def __iter__(self):
for key, sequence in zip(self.ids, self.sequences):
yield (key, sequence)
def __len__(self):
return len(self.ids)
def __repr__(self):
return 'SequenceArray({0})'.format([(k, v) for k, v in zip(self.ids, self.sequences)])
def __contains__(self, key):
return True if key in self.ids else False
def keys(self):
for _ in self.ids:
yield _
def values(self):
for _ in self.sequences:
yield _
def items(self):
for x in range(len(self.ids)):
yield (self.ids[x], self.sequences[x])
def to_fasta(self, path, linewidth=60):
"""Save sequence array as a FASTA file
Parameters
----------
path : str
Filename/path of FASTA file
linewidth : int
Line width of FASTA file
"""
with open(path, 'w') as f:
print(SequenceArray.array_to_fasta(self.ids, self.sequences, linewidth=linewidth), file=f)
def align(self, aln_file='out.aln', program='muscle', program_args=None):
"""Calls an external alignment program to align the sequences in the sequence array.
Parameters
----------
aln_file : str
Filename/path of resulting multiple sequence alignment
program : str
External program to be called for multiple sequence alignment. Currently supported programs are
'muscle' (Muscle), 'mafft' (MAFFT), 'clustalw' (ClustalW), 'clustalo' (Clustal Omega), 'prank' (PRANK).
To ensure that this method works properly, make sure that these programs are installed and
accessible from the system's PATH.
program_args : str
Additional user-specified arguments
Returns
-------
SequenceAlignment
"""
# check if program is in choices or not. if not return an error
choices = ['muscle', 'mafft', 'clustalo']
assert program in choices, Exception('Program not supported. Choose from the following: \
"muscle", "mafft", "clustalw", "clustalo", "prank"')
# Write the SequenceArray object to file
seqfile = '{0}.{1}'.format(self.name, 'fna' if self.seqtype == 'nucl' else 'faa')
self.to_fasta(seqfile)
# TODO : extract program paths into variables so that users can alter at runtime
# Default to MUSCLE
if program == 'mafft':
cmd_str = 'mafft {args} {i} > {o}'.format(args='--auto' if not program_args else program_args,
i=seqfile, o=aln_file)
elif program == 'clustalw': # TODO : ClustalW hook
raise Exception('ClustalW support is in development.')
elif program == 'clustalo':
cmd_str = 'clustalo -i {i} -o {o} '.format(i=seqfile, o=aln_file)
elif program == 'prank': # TODO : PRANK hook
raise Exception('PRANK support is in development.')
else:
# Default to MUSCLE
cmd_str = 'muscle {args}-in {i} -out {o}'.format(args='' if not program_args else program_args+' ',
i=seqfile, o=aln_file)
# TODO : change to subprocess
os.system(cmd_str)
if self.seqtype == 'nucl':
return NucleotideAlignment(aln_file)
elif self.seqtype == 'prot':
return ProteinAlignment(aln_file)
@staticmethod
def parse_fasta(path, seqtype='nucl', upper=True):
"""Read FASTA format entirely using only built-ins.
Parameters
----------
path : str
File path (absolute or relative) where the FASTA file is located.
seqtype : str
'nucl' (Nucleotide), 'prot' (Protein), 'cod' (Codon-based)
Returns
-------
OrderedDict
FASTA headers are stored as dictionary keys and its corresponding sequence is stored as its value.
"""
keys = []
sequence = ''
sequences = []
line_id_re = re.compile('^>(.+)[\s\n]')
with open(path, 'r') as f:
for i, line in enumerate(f.readlines()):
if line.startswith('>'):
# TODO : remove when validated to work
# line_id = line[1:-1] # uses the entire description line
_match = line_id_re.search(line) # uses only the string before space
try:
line_id = _match.group(1)
except:
raise ValueError('Malformed description line <line {} of {}>'.format(i, path))
keys.append(line_id)
if sequence:
if upper:
sequence = sequence.upper()
sequences.append(sequence)
sequence = ''
else:
sequence += re.sub('\s', '', line.upper())
if sequence:
if upper:
sequence = sequence.upper()
sequences.append(sequence)
return SequenceArray(OrderedDict(zip(keys, sequences)), seqtype=seqtype)
@staticmethod
def array_to_fasta(keys, sequences, linewidth=60):
"""Converts a sequence array to a FASTA-formatted string
Parameters
----------
keys : list
List of record names
sequences : list
List of sequences (list of lists or 2d ndarray)
linewidth : int
Number of characters per line
Returns
-------
str
FASTA-formatted string
"""
fasta_str = ''
for key, sequence in zip(keys, sequences):
sequence = ''.join(sequence)
header = '>{0}'.format(key)
fasta_str += header + '\n'
for i in range(0, len(sequence), linewidth):
curr_line = sequence[i:i+linewidth]
fasta_str += curr_line + '\n'
return fasta_str
@staticmethod
def composition(sequence_obj, seqtype='nucl'):
"""Return the per sequence composition of a sequence array
Parameters
----------
sequence_obj : SequenceArray
seqtype : str
Returns
-------
OrderedDict
Keys are sequence ids and values are OrderedDict of the corresponding percent makeup for each character
except gaps
Notes
-----
The return value is an OrderedDict of OrderedDicts. First level is indexed by sequence while second level is
indexed by character.
>>> seq_array = OrderedDict([('key1', 'AAA'), ('key2', 'TTT')])
>>> composition_of_seq_array = OrderedDict([ \
('key1', OrderedDict([('T', 0), ('C', 0), ('A', 3/float(3)), ('G', 0)])), \
('key2', OrderedDict([('T', 3/float(3)), ('C', 0), ('A', 0), ('G', 0)])), ])
>>> seq_array['key1']
'AAA'
>>> composition_of_seq_array['key1']
OrderedDict([('T', 0), ('C', 0), ('A', 1.0), ('G', 0)])
>>> composition_of_seq_array['key1']['A']
1.0
"""
# assert re.search('^[ATCG\-]+$', sequence), 'Input sequence contains characters other than A,T,C,G,-'
composition_of = OrderedDict()
characters = BASES if seqtype in ['nucl', 'cod'] else AMINO_ACIDS
for seqid, sequence in zip(sequence_obj.ids, sequence_obj.sequences):
char_counts = Counter(sequence.upper())
total = sum([v for k, v in char_counts.items() if k != '-'])
composition_of[seqid] = OrderedDict([(k, char_counts[k]/float(total)) for k in characters])
# return pd.DataFrame(composition_of)
return composition_of
class NucleotideArray(SequenceArray):
"""
Nucleotide sequence array object constructor
This is a special type of SequenceArray for nucleotide sequences containing additional methods specific for
handling nucleotide sequence data. On instantiation, it constructs a SequenceArray object whose seqtype is set to
'nucl'.
NucleotideArray is suitable for both protein-coding and non-protein coding nucleotide sequences. However,
if sequences are protein-coding, it is better to use the CodonArray object as this contains methods useful
for protein-coding sequences such as the ability count by nucleotide triplets and to translate to amino acid
sequences.
If the array contains in-frame protein-coding sequence, NucleotideArray can construct a CodonArray using the method
`to_codonarray`. However, NucleotideArray cannot differentiate by itself whether a sequence is coding or
non-coding, and if coding, whether it is in-frame or not, therefore it is up to the user to judge
whether it is appropriate to represent the sequences as plain nucleotides through NucleotideArray or as
protein-coding sequences through CodonArray.
"""
def __init__(self, input_obj, name='', description='', validate=False):
"""Create a new NucleotideArray from a dictionary, file or FASTA-formatted string.
Parameters
----------
input_obj : dict, str
Object used to populate a SequenceArray object. This may be one of the following:
- Dictionary-like object whose id are sequence record names and values are the corresponding sequences
- Path to a FASTA file
- FASTA-formatted string
name : str
Name of the set of sequence records
description : str
Short description
"""
super().__init__(input_obj, name=name, seqtype='nucl', description=description, validate=validate)
def to_codonarray(self):
"""Create a CodonArray from the current NucleotideArray
Returns
-------
CodonArray
"""
return CodonArray(deepcopy(self), name=self.name, description=self.description)
def basecomp(self):
"""Return base composition of each sequence
Returns
-------
OrderedDict
Keys are sequence ids and values are OrderedDict of the corresponding percent makeup for each character
except gaps. For example, 'T', 'C', 'A', 'G' for nucleotides.
Sea also
--------
SequenceArray.composition : Character composition of a sequence
"""
basecomp_of = super().composition(self, seqtype=self.seqtype)
for key, comp in basecomp_of.items():
basecomp_of[key]['AT'] = comp['A'] + comp['T']
basecomp_of[key]['GC'] = comp['G'] + comp['C']
return basecomp_of
@staticmethod
def nucleotide_to_codon(nucleotide_str):
"""Converts a nucleotide triplet into its corresponding codon
Parameters
----------
nucleotide_str : str or sequence
Nucleotide sequence (str or list)
Yields
------
str
3-character string codon
"""
if len(nucleotide_str) % 3 != 0:
raise ValueError('SequenceArray length is not a multiple of three ({0}).'.format(len(nucleotide_str)))
for j in range(0, len(nucleotide_str), 3):
if j+3 <= len(nucleotide_str):
yield nucleotide_str[j:j+3]
class CodonArray(SequenceArray):
"""
Protein-coding nucleotide sequence array object constructor
This is a special type of SequenceArray for protein-coding sequences. If the array contains
in-frame protein-coding sequence, NucleotideArray contains methods to represent data as codons (nculeotide triplets)
and translate to protein sequence. Note that NucleotideArray cannot differentiate by itself whether a sequence
is coding or non-coding, therefore it is up to the user to judge whether it is appropriate to use these methods on
the data.
"""
def __init__(self, input_obj, name='', description='', validate=False):
"""Create a new CodonArray from a dictionary, file or FASTA-formatted string.
Parameters
----------
input_obj : dict or str
Object used to populate a SequenceArray object. This may be one of the following:
- Dictionary-like object whose id are sequence record names and values are the corresponding sequences
- Path to a FASTA file
- FASTA-formatted string
name : str
Name of the set of sequence records
description : str
Short description
"""
super().__init__(input_obj, name=name, seqtype='cod', description=description, validate=validate)
self.pos = OrderedDict()
self.pos[1] = NucleotideArray(
OrderedDict([(seqid, ''.join(sequence_as_list))
for seqid, sequence_as_list in zip(self.ids,
map(lambda x: x[0::3], self.sequences))]))
self.pos[2] = NucleotideArray(
OrderedDict([(seqid, ''.join(sequence_as_list))
for seqid, sequence_as_list in zip(self.ids,
map(lambda x: x[1::3], self.sequences))]))
self.pos[3] = NucleotideArray(
OrderedDict([(seqid, ''.join(sequence_as_list))
for seqid, sequence_as_list in zip(self.ids,
map(lambda x: x[2::3], self.sequences))]))
@property
def translated(self):
"""Translates nucleotide sequences into amino acid sequences
Assumes that the nucleotide sequence is protein-coding, in-frame, and the start of the ORF corresponds
to the beginning of the nucleotide sequence.
Returns
-------
ProteinArray
"""
translated_seqarray = OrderedDict()
for key, nt_seq in zip(self.ids, self.sequences):
translated_seqarray[key] = ''.join([GENETIC_CODE[cod.upper()]
for cod in NucleotideArray.nucleotide_to_codon(nt_seq)])
return ProteinArray(translated_seqarray)
def codonalign(self, codon_aln_file='out.ffn.aln', program='muscle'):
"""Aligns by codons using a protein alignment generated by an external program
Assumes the sequence is in-frame and is a coding sequences. First, the sequences are translated into proteins,
which are aligned by an external program. The resulting protein alignment is used as an anchor to align
nucleotide sequences.
Parameters
----------
codon_aln_file : str
File path of resulting codon-aligned multiple sequence alignment
program : str or path
External program to be called to align translated protein sequences. Currently supported
programs are 'muscle' (Muscle), 'mafft' (MAFFT), 'clustalw' (ClustalW), 'clustalo' (Clustal Omega),
'prank' (PRANK)
Returns
-------
CodonAlignment
codon-based multiple sequence alignment.
"""
if self.seqtype != 'cod':
raise Exception('Seqtype must be "cod" (codon) to perform codon alignment.')
for i, sequence in enumerate(self.sequences):
if len(sequence) % 3 != 0:
raise ValueError('"{0}" sequence length is not a multiple of three ({1}).'
.format(self.ids[i], len(sequence)))
# check if program is in choices or not. if not return an error
choices = ['muscle', 'mafft', 'clustalo']
assert program in choices, 'Program not supported. Choose from the following: \
"muscle", "mafft", "clustalw", "clustalo", "prank"'
# Write translated SequenceArray object to file
transl_seqfile = '{0}.transl.{1}'.format(self.name, 'faa')
self.translated.to_fasta(transl_seqfile)
# Align protein sequences
aa_aln = self.translated.align(aln_file=codon_aln_file, program=program)
# Adjust codons based on amino acid alignment
codon_aln = OrderedDict()
i = 0
for nt_seq, aa_aln_seq in zip(self.sequences, aa_aln.sequences):
codon = NucleotideArray.nucleotide_to_codon(nt_seq)
codon_aln[self.ids[i]] = ''.join([next(codon) if aa != '-' else '---' for aa in list(aa_aln_seq)])
i += 1
codon_aln = CodonAlignment(codon_aln)
codon_aln.to_fasta(codon_aln_file)
return codon_aln
def basecomp(self):
"""Return base composition of each sequence
Returns
-------
OrderedDict
Keys are sequence ids and values are OrderedDict of the corresponding percent makeup for each character
except gaps. For example, 'T', 'C', 'A', 'G' for nucleotides.
Sea also
--------
SequenceArray.composition : Character composition of a sequence
"""
all_basecomp_of = super().composition(self, seqtype=self.seqtype)
for key, comp in all_basecomp_of.items():
all_basecomp_of[key]['AT'] = comp['A'] + comp['T']
all_basecomp_of[key]['GC'] = comp['G'] + comp['C']
pos_basecomp_of = {1: super().composition(self.pos[1], seqtype=self.seqtype),
2: super().composition(self.pos[2], seqtype=self.seqtype),
3: super().composition(self.pos[3], seqtype=self.seqtype),
}
for pos, basecomp_of in pos_basecomp_of.items():
for key, comp in basecomp_of.items():
for base in BASES:
all_basecomp_of[key][base + str(pos)] = basecomp_of[key][base]
all_basecomp_of[key]['AT' + str(pos)] = comp['A'] + comp['T']
all_basecomp_of[key]['GC' + str(pos)] = comp['G'] + comp['C']
return all_basecomp_of
# TODO : Subclass string and tuple to create nucl, prot, cod datatypes
@staticmethod
def icod(nucl_seq, pos):
"""Retrieves the codon at the specified codon position from a nucleotide sequence
Parameters
----------
nucl_seq : str
pos : int
Returns
-------
str
codon string of length 3
"""
assert len(nucl_seq) % 3 == 0, ValueError('Sequence is not in multiples of three.')
assert isinstance(pos, int), ValueError('Position should be an integer value.')
return nucl_seq[3*pos:(3*pos)+3]
class ProteinArray(SequenceArray):
def __init__(self, input_obj, name='', description='', validate=False):
"""Create a new ProteinArray from a dictionary, file or FASTA-formatted string.
Parameters
----------
input_obj : tuple or dict or str
Object used to populate a SequenceArray object. This may be one of the following:
- Dictionary-like object whose id are sequence record names and values are the corresponding sequences
- Path to a FASTA file
- FASTA-formatted string
name : str
Name of the set of sequence records
description : str
Short description
"""
super().__init__(input_obj, name=name, seqtype='prot', description=description, validate=validate)
def aacomp(self):
return super().composition(self, seqtype=self.seqtype)
class SequenceAlignment(MutableMapping):
"""
Multiple sequence alignment base class
The object specified by this class is a combination of a list of sequence names and a 2d numpy ndarray that
represents the alignment. Thus, each sequence record is a key-value pairing of a sequence name and its corresponding
sequence in the ndarray.
Records can be accessed by its key like a dictionary. In addition, multiple records can be accessed simultaneously
by passing a list. Record values are returned as a numpy ndarray based on the order of keys passed.
Methods that permutate the alignment other than adding or deleting records will return a new instance of the
alignment. No in-place changes take place when using these methods.
This is the base class for NucleotideAlignment, ProteinAlignment and CodonAlignment.
"""
def __init__(self, input_obj, seqtype, charsize=1, name='', description=''):
# TODO : accept FASTA-formatted string
"""Create a new SequenceAlignment from a tuple, dictionary, file or FASTA-formatted string.
Parameters
----------
input_obj : dict or str
Alignment objects can be instantiated by passing one of the following:
- Tuple of list of record names and ndarray of sequences
- Dictionary-like object
- File path (absolute or relative)
seqtype : str
'nucl' (Nucleotide), 'prot' (Protein), 'cod' (Codon-based)
charsize : int
Number of characters that define a column of the alignment.
name : str
Name of the set of sequence records
description : str
Short description
"""
# Description
self.seqtype = seqtype
self.name = name
self.description = description
self.charsize = charsize
# Tuple containing an ordered list of ids and a numpy array of the sequence
if isinstance(input_obj, tuple):
if len(input_obj) == 2:
if isinstance(input_obj[0], list):
self._ids = input_obj[0]
else:
raise TypeError('First item of tuple is not a list.')
# TODO : obviate the need for importing numpy, possibly by forking tinynumpy to enable character arrays
if isinstance(input_obj[1], np.ndarray):
self._sequences = input_obj[1]
else:
raise TypeError('Second item in tuple is not a numpy array.')
else:
list_of_sequences = []
# dictionary of id as index and sequence stored as str as the value
if isinstance(input_obj, dict) or isinstance(input_obj, MutableMapping):
self._ids = []
for k, v in input_obj.items():
if not isinstance(v, str):
raise TypeError('SequenceArray "{0}" is not a string.'.format(k))
self._ids.append(k)
list_of_sequences.append(v)
# Check if sequences are of equal length
list_of_sequence_lengths = set([len(s) for s in list_of_sequences])
if len(list_of_sequence_lengths) > 1:
raise ValueError('Unequal sequence lengths.')
# String path to FASTA file
elif isinstance(input_obj, str):
# Check if it is a valid path, and the file exists
if os.path.exists(input_obj):
# Parse FASTA file
# fasta_dct = self.parse_fasta(input_obj)
fasta_dct = SequenceArray.parse_fasta(input_obj, seqtype=seqtype)
# Store record ID as list
self._ids = list(fasta_dct.keys())
list_of_sequences = list(fasta_dct.values())
# TODO : Test if the string is in FASTA format
else:
raise Exception('Passing FASTA-formatted strings are not yet supported. \
Instantiate using an OrderedDict or passing a valid filepath instead.')
# Store sequences as a numpy array. Order of array rows should correspond to the order of IDs
# in the record list
# Check if length is divisible by charsize
assert len(list_of_sequences[0]) % self.charsize == 0, \
ValueError('Alignment length is not divisible by 3 ({} nt)'.format(len(list_of_sequences[0])))
self._sequences = np.array(
[[seq[j:j+self.charsize] for j in range(0, len(seq), self.charsize) if j+self.charsize <= len(seq)]
for seq in list_of_sequences], dtype='U1' if charsize == 1 else 'U3')
# Size of alignment
self.count = len(self)
self.shape = self.sequences.shape # number of sample, number of units (bases/aa/codons), char per unit
self.length = self.sequences.shape[1]
# TODO : Make a "restricted" descriptor for any type of attribute that should not be changed outside of __init__
# Restrict setting "ids" attribute outside of __init__
@property
def ids(self):
return self._ids
@ids.setter
def ids(self, value):
raise AttributeError('Setting ids using this method is not permitted.')
@ids.deleter
def ids(self):
raise AttributeError('Deleting ids using this method is not permitted.')
# Restrict setting "sequences" attribute outside of __init__
@property
def sequences(self):
return self._sequences
@sequences.setter
def sequences(self, value):
raise AttributeError('Setting sequences using this method is not permitted.')
@sequences.deleter
def sequences(self):
raise AttributeError('Deleting sequences using this method is not permitted.')
def __setitem__(self, key, value):
"""Add or update a sequence record
Parameters
----------
key: str
Record name
value: np.ndarray
Numpy array of the sequence. The length of the added array must be the same as the length of the current
array.
"""
if key in self.ids:
raise KeyError('Key name {0} already in use.'.format(key))
else:
# Check if has the same length as the number of cols of the current array
if len(value) == self.length:
self.ids.append(key)
self.sequences = np.vstack(
[self.sequences, [value[j:j+self.charsize] for j in range(0, len(value), self.charsize)
if j+self.charsize <= len(value)]])
else:
raise ValueError('New sequence length {0} does not match alignment length {1}.'
.format(len(value), self.length))
def __getitem__(self, keys):
"""Given a key or list of keys, retrieve a record or multiple records
Parameters
keys : str or list
Record name or list of record names
Returns
-------
np.ndarray
Sequence ndarray or multiple sequence alignment ndarray if a list of keys was passed
"""
if isinstance(keys, collections.Iterable) and not isinstance(keys, str):
index_list = []
for key in keys:
if key in self.ids:
index_list.append(self.ids.index(key))
else:
raise Exception('Key "{0}" does not exist'.format(key))
return self.sequences[index_list]
else:
key = keys
if key in self.ids:
index = self.ids.index(key)
else:
raise KeyError('Key "{0}" does not exist'.format(key))
return self.sequences[index]
def __delitem__(self, key):
if key in self.ids:
index = self.ids.index(key)
self.ids.remove(key)
self.sequences = np.delete(self.sequences, index, axis=0) # 0 means by row
else:
raise KeyError('Key "{0}" does not exist'.format(key))
def __iter__(self):
for key, sequence in zip(self.ids, self.sequences):
yield key, sequence
def __len__(self):
# Return the number of samples in the alignment
return len(self.ids)
def __repr__(self):
return 'keys({0})\n{1}'.format(repr(self.ids), repr(self.sequences))
def __add__(self, other):
# Check if self.ids and other.ids match
if set(self.ids) != set(other.ids):
raise KeyError('Keys do not match.')
if self.seqtype != other.seqtype:
raise ValueError('Seqtypes do not match.')
other_order = [other.ids.index(key) for key in self.ids]
return type(self)(
MSA(ids=self.ids, alignment=np.concatenate((self.sequences, other.sequences[other_order]), axis=1)),
self.seqtype)
def __iadd__(self, other):
return self + other
def keys(self):
for _ in self.ids:
yield _
def values(self):
for _ in self.sequences:
yield _
def items(self):
for x in range(len(self.ids)):
yield (self.ids[x], self.sequences[x])
def head(self):
"""Retrieves the first 5 entries of the sequence alignment
Returns
-------
SequenceAlignment
Creates a subset of the current SequenceAlignment containing only the first five entries.
"""
return type(self)(MSA(ids=self.ids[:5], alignment=self.sequences[:5]), self.seqtype)
def tail(self):
"""Retrieves the last 5 entries of the sequence alignment
Returns
-------
SequenceAlignment
Creates a subset of the current SequenceAlignment containing only the last five entries.
"""
return type(self)(MSA(ids=self.ids[-5:], alignment=self.sequences[-5:]), self.seqtype)
# noinspection PyTypeChecker
def colx(self, *args):
"""Returns a length-wise (column range) subset of the alignment
Parameters
----------
args: int
Inclusive start and exclusive end position of the subset. Follows Python slice conventions.
Returns
-------
SequenceAlignment
Create a subset of the current SequenceAlignment containing only the specified column range.
"""
if len(args) == 1:
return type(self)(MSA(ids=self.ids, alignment=self.sequences[:, args[0]]), self.seqtype)
elif len(args) == 2:
return type(self)(MSA(ids=self.ids, alignment=self.sequences[:, args[0]:args[1]]), self.seqtype)
elif len(args) == 3:
return type(self)(MSA(ids=self.ids, alignment=self.sequences[:, args[0]:args[1]:args[2]]),
self.seqtype)
else:
raise Exception('Method uses 3 integer arguments at most.')
def subset(self, keys):
"""Returns a row-wise (by sample) subset of the current alignment
Parameters
----------
keys : str or list
Record name or list of record names
Returns
-------
SequenceAlignment
Create a subset of the current SequenceAlignment containing only the specified records.
"""
return type(self)(MSA(ids=keys, alignment=self[keys]), self.seqtype)
def labelpartition(self, label, start, end, coding=True):
NotImplementedError()
def xgap(self, all=False):
"""Remove columns containing gap character from the current alignment
Parameters
----------
all : bool
If True, removes a column only when whole column is gapped.
If False, removes a column even when only one gap character is present
Returns
-------
SequenceAlignment
Creates a new SequenceAlignment free of gap characters.
"""
gapchar = '-'*self.charsize
xgap_cols = []
for i in range(self.length):
if all:
# noinspection PyTypeChecker
if not np.all(gapchar == self.sequences[:, i]):
xgap_cols.append(i)
else:
# noinspection PyTypeChecker
if not np.any(gapchar == self.sequences[:, i]):
xgap_cols.append(i)
return type(self)(MSA(ids=self.ids, alignment=self.sequences[:, xgap_cols]), self.seqtype)
def resample_cols(self):
"""Creates a new SequenceAlignment using resampled alignment columns with replacement from the current data
Returns
-------
SequenceAlignment
"""
randlist = np.random.choice(self.length, self.length, replace=True)
return type(self)(MSA(ids=self.ids, alignment=self.sequences[:, randlist]), self.seqtype)
def reorder(self, ordered_key_list):
"""Reorder alignment based on the order of a a given list of keys
Parameters
----------
ordered_key_list : list
List of record names arranged based on how samples should be reordered
Returns
-------
SequenceAlignment
Reordered alignment
"""
sequences = []
for key in ordered_key_list:
index = self.ids.index(key)
sequences.append(self.sequences[index])
return type(self)(MSA(ids=ordered_key_list, alignment=np.array(sequences)), self.seqtype)
def to_fasta(self, path, linewidth=60):
"""Save the alignment as a FASTA-formatted file
Parameters
----------
path : str
Filename/path of FASTA file
linewidth : int
Number of characters per line
"""
# TODO : Check if basedir of path exists
with open(path, 'w') as f:
print(self.__class__.alignment_to_fasta(self, linewidth=linewidth), file=f)
def to_phylip(self, path): # TODO
NotImplementedError()
def pssm(self):
"""
Position-specific scoring matrix of the alignment
Returns
-------
np.array
"""
# TODO : update for codon
if self.seqtype == 'nucl':
characters = list(BASES)
characters.append('-')
elif self.seqtype == 'prot':
characters = list(AMINO_ACIDS)
characters.append('X')
characters.append('-')
elif self.seqtype == 'cod':
characters = list(CODONS)
characters.append('---')
else:
raise ValueError()
pssm_sparse = lil_matrix((self.length, len(characters)))
for i in range(self.length):
seq = np.array(list(map(str.upper, self.sequences[:, i])))
unique_cnts = np.unique(seq, return_counts=True)
for j, char in enumerate(unique_cnts[0]):
char_cnt = unique_cnts[1][j]
if char in characters:
pssm_sparse[i, characters.index(char)] = char_cnt
else:
if self.seqtype == 'nucl':
for part_base in DEGENERATE_BASES[char]:
if pssm_sparse[i, characters.index(part_base)]:
pssm_sparse[i, characters.index(part_base)] += char_cnt / \
float(len(DEGENERATE_BASES[char]))
else:
pssm_sparse[i, characters.index(part_base)] = char_cnt / \
float(len(DEGENERATE_BASES[char]))
elif self.seqtype == 'cod':
char_val = [dict(), dict(), dict()]
for k, cod_base in enumerate(char):
print(cod_base)
if cod_base not in BASES:
for part_base in DEGENERATE_BASES[cod_base]:
char_val[k][part_base] = 1 / float(len(DEGENERATE_BASES[cod_base]))
else:
char_val[k][cod_base] = 1
for a, a_val in char_val[0].items():
for b, b_val in char_val[1].items():
for c, c_val in char_val[2].items():
if pssm_sparse[i, characters.index(a+b+c)]:
pssm_sparse[i, characters.index(a+b+c)] += char_cnt * a_val * b_val * c_val
else:
pssm_sparse[i, characters.index(a + b + c)] = char_cnt * a_val * b_val * c_val
else:
raise ValueError(char)
return pd.DataFrame(pssm_sparse.toarray(), columns=list(characters))
def consensus_matrix(self):
pssm_df = self.pssm()
consensus_idx = pssm_df.idxmax(axis=1)
consensus_cnt = pssm_df.max(axis=1)
consensus_df = pd.concat([consensus_idx, consensus_cnt], axis=1)
consensus_df.columns = ['char', 'count']
return consensus_df
def consensus_sequence(self):
pssm_df = self.pssm()
consensus_idx = pssm_df.idxmax(axis=1)
return list(consensus_idx.to_dict().values())
@staticmethod
def parse_fasta(path, seqtype='nucl', upper=True, output_type='array'):
"""Read FASTA format entirely using only built-ins.
Parameters
----------
path : str
File path (absolute or relative) where the FASTA file is located.
seqtype : str
'nucl' (Nucleotide), 'prot' (Protein), 'cod' (Codon-based)
output_type : 'array', 'aln', optional (default = 'array')
Choose between output as array (array) or an alingment (aln)
Returns
-------
OrderedDict
FASTA headers are stored as dictionary keys and its corresponding sequence is stored as its value.
"""
lengths = set()
seq_array = SequenceArray.parse_fasta(path, seqtype=seqtype, upper=upper)
for key, seq in seq_array.items():
lengths.add(len(seq))
if output_type == 'ndarray':
if len(lengths) == 1:
return SequenceAlignment(seq_array, seqtype)
elif output_type == 'dict':
return seq_array
@staticmethod
def concat(*alignments):
"""Concatenate multiple sequence alignments together
Parameters
----------
alignments : SequenceAlignment
Returns
-------
SequenceAlignment
New concatenated SequenceAlignment
"""
concaternated_alignment = alignments[0]
for alignment in alignments[1:]:
concaternated_alignment += alignment
return concaternated_alignment
@staticmethod
def alignment_to_fasta(alignment, linewidth=60):
"""Save the alignment as a FASTA-formatted file
Parameters
----------
alignment : SequenceAlignment
linewidth : int
Number of characters per line
Returns
-------
str
FASTA-formatted string
"""
return SequenceArray.array_to_fasta(alignment.ids, alignment.sequences, linewidth=linewidth)
@staticmethod
def composition(alignment_obj, seqtype='nucl'):
"""Returns the character composition of the sequence alignment
Parameters
----------
alignment_obj : SequenceAlignment
seqtype : str
'nucl' (Nucleotide), 'prot' (Protein), 'cod' (Codon-based)
Returns
-------
OrderedDict
"""
sequence_obj = SequenceArray(
OrderedDict([(seqid, ''.join(sequence_as_list))
for seqid, sequence_as_list in zip(alignment_obj.ids, alignment_obj.sequences)]))
return SequenceArray.composition(sequence_obj, seqtype=seqtype)
class NucleotideAlignment(SequenceAlignment):
def __init__(self, input_obj, name='', description=''):
"""Create a new NucleotideAlignment from a tuple, dictionary, file or FASTA-formatted string.
Parameters
----------
input_obj : tuple or dict or str
Alignment objects can be instantiated by passing one of the following:
- Tuple of list of record names and ndarray of sequences
- Dictionary-like object
- File path (absolute or relative)
name : str
Name of the set of sequence records
description : str
Short description
"""
super().__init__(input_obj, 'nucl', charsize=1, name=name, description=description)
def basecomp(self):
"""Returns the base composition of the current nucleotide alignment
Returns
-------
OrderedDict
"""
basecomp_of = super().composition(self, seqtype=self.seqtype)
for key, comp in basecomp_of.items():
basecomp_of[key]['AT'] = comp['A'] + comp['T']
basecomp_of[key]['GC'] = comp['G'] + comp['C']
return basecomp_of
class ProteinAlignment(SequenceAlignment):
def __init__(self, input_obj, name='', description=''):
"""Create a new ProteinAlignment from a tuple, dictionary, file or FASTA-formatted string.
Parameters
----------
input_obj : tuple or dict or str
Alignment objects can be instantiated by passing one of the following:
- Tuple of list of record names and ndarray of sequences
- Dictionary-like object
- File path (absolute or relative)
name : str
Name of the set of sequence records
description : str
Short description
"""
super().__init__(input_obj, 'prot', charsize=1, name=name, description=description)
def aacomp(self):
return super().composition(self, seqtype=self.seqtype)
class CodonAlignment(NucleotideAlignment):
def __init__(self, input_obj, name='', description=''):
"""Create a new CodonAlignment from a tuple, dictionary, file or FASTA-formatted string.
Parameters
----------
input_obj : tuple or dict or str
Alignment objects can be instantiated by passing one of the following:
- Tuple of list of record names and ndarray of sequences
- Dictionary-like object
- File path (absolute or relative)
name : str
Name of the set of sequence records
description : str
Short description
"""
SequenceAlignment.__init__(self, input_obj, seqtype='cod', charsize=3, name=name, description=description)
# Create nucleotide alignment
ntaln_lst = list()
for seq in self.sequences:
ntaln_concat = list()
for seq_seq in seq:
ntaln_concat.append(''.join(seq_seq))
ntaln_lst.append(''.join(ntaln_concat))
self.nucl_aln = NucleotideAlignment(
MSA(ids=self.ids, alignment=np.array(np.array([list(seq) for seq in ntaln_lst], dtype='U1'), dtype='U1')))
self.pos = OrderedDict()
self.pos[1] = self.nucl_aln.colx(0, None, 3)
self.pos[2] = self.nucl_aln.colx(1, None, 3)
self.pos[3] = self.nucl_aln.colx(2, None, 3)
def make_raxml_codon_partition_file(self, save_path):
"""Make RAxML partition file for a codon alignment
Parameters
----------
save_path: str
Partition file save path
"""
# TODO : check if basedir of save_path exists
ordinal_suffix = {1: 'st', 2: 'nd', 3: 'rd', 4: 'th', 5: 'th', 6: 'th', 7: 'th', 8: 'th', 9: 'th', 0: 'th'}
# TODO : Warn if file already exists
with open(save_path, 'w') as f:
for i in range(1, 4):
print('DNA, {0}{1}pos={0}-{2}\\3'.format(i, ordinal_suffix[i], self.length*3), file=f)
@staticmethod
def composition(codon_aln_obj, fold_counts=(), pos=3):
"""Return the character composition of a CodonAlignment depending on codon position and fold count
Parameters
----------
codon_aln_obj : CodonAlignment
fold_counts : int
1,2,3,4,6 fold codon degeneracy
pos : int
1 (1st position), 2 (2nd position), 3 (3rd position)
Returns
-------
OrderedDict
"""
pos -= 1 # input is 1-indexed but Python is 0-indexed
codon_filter_set = set([codon for codon, fold in CODON_FOLD.items() if fold in fold_counts])
filtered_sequences = OrderedDict()
for seqid, seq_ndarray in codon_aln_obj.items():
if pos == -1:
filtered_sequences[seqid] = ''.join(
[codon for codon in seq_ndarray if codon in codon_filter_set]
)
else:
filtered_sequences[seqid] = ''.join(
[codon[pos] for codon in seq_ndarray if codon in codon_filter_set]
)
sequence_obj = SequenceArray(filtered_sequences)
return SequenceArray.composition(sequence_obj, seqtype='cod')
| apache-2.0 |
uzgit/ardupilot | Tools/mavproxy_modules/lib/magcal_graph_ui.py | 108 | 8248 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from matplotlib.backends.backend_wxagg import FigureCanvas
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from pymavlink.mavutil import mavlink
from MAVProxy.modules.lib import wx_processguard
from MAVProxy.modules.lib.wx_loader import wx
import geodesic_grid as grid
class MagcalPanel(wx.Panel):
_status_markup_strings = {
mavlink.MAG_CAL_NOT_STARTED: 'Not started',
mavlink.MAG_CAL_WAITING_TO_START: 'Waiting to start',
mavlink.MAG_CAL_RUNNING_STEP_ONE: 'Step one',
mavlink.MAG_CAL_RUNNING_STEP_TWO: 'Step two',
mavlink.MAG_CAL_SUCCESS: '<span color="blue">Success</span>',
mavlink.MAG_CAL_FAILED: '<span color="red">Failed</span>',
}
_empty_color = '#7ea6ce'
_filled_color = '#4680b9'
def __init__(self, *k, **kw):
super(MagcalPanel, self).__init__(*k, **kw)
facecolor = self.GetBackgroundColour().GetAsString(wx.C2S_HTML_SYNTAX)
fig = plt.figure(facecolor=facecolor, figsize=(1,1))
self._canvas = FigureCanvas(self, wx.ID_ANY, fig)
self._canvas.SetMinSize((300,300))
self._id_text = wx.StaticText(self, wx.ID_ANY)
self._status_text = wx.StaticText(self, wx.ID_ANY)
self._completion_pct_text = wx.StaticText(self, wx.ID_ANY)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self._id_text)
sizer.Add(self._status_text)
sizer.Add(self._completion_pct_text)
sizer.Add(self._canvas, proportion=1, flag=wx.EXPAND)
self.SetSizer(sizer)
ax = fig.add_subplot(111, axis_bgcolor=facecolor, projection='3d')
self.configure_plot(ax)
def configure_plot(self, ax):
extra = .5
lim = grid.radius + extra
ax.set_xlim3d(-lim, lim)
ax.set_ylim3d(-lim, lim)
ax.set_zlim3d(-lim, lim)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
self._polygons_collection = Poly3DCollection(
grid.sections_triangles,
edgecolors='#386694',
)
ax.add_collection3d(self._polygons_collection)
def update_status_from_mavlink(self, m):
status_string = self._status_markup_strings.get(m.cal_status, '???')
self._status_text.SetLabelMarkup(
'<b>Status:</b> %s' % status_string,
)
def mavlink_magcal_report(self, m):
self.update_status_from_mavlink(m)
self._completion_pct_text.SetLabel('')
def mavlink_magcal_progress(self, m):
facecolors = []
for i, mask in enumerate(m.completion_mask):
for j in range(8):
section = i * 8 + j
if mask & 1 << j:
facecolor = self._filled_color
else:
facecolor = self._empty_color
facecolors.append(facecolor)
self._polygons_collection.set_facecolors(facecolors)
self._canvas.draw()
self._id_text.SetLabelMarkup(
'<b>Compass id:</b> %d' % m.compass_id
)
self._completion_pct_text.SetLabelMarkup(
'<b>Completion:</b> %d%%' % m.completion_pct
)
self.update_status_from_mavlink(m)
_legend_panel = None
@staticmethod
def legend_panel(*k, **kw):
if MagcalPanel._legend_panel:
return MagcalPanel._legend_panel
p = MagcalPanel._legend_panel = wx.Panel(*k, **kw)
sizer = wx.BoxSizer(wx.HORIZONTAL)
p.SetSizer(sizer)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._empty_color)
sizer.Add(marker, flag=wx.ALIGN_CENTER)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections not hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
marker = wx.Panel(p, wx.ID_ANY, size=(10, 10))
marker.SetBackgroundColour(MagcalPanel._filled_color)
sizer.Add(marker, border=10, flag=wx.ALIGN_CENTER | wx.LEFT)
text = wx.StaticText(p, wx.ID_ANY)
text.SetLabel('Sections hit')
sizer.Add(text, border=4, flag=wx.ALIGN_CENTER | wx.LEFT)
return p
class MagcalFrame(wx.Frame):
def __init__(self, conn):
super(MagcalFrame, self).__init__(
None,
wx.ID_ANY,
title='Magcal Graph',
)
self.SetMinSize((300, 300))
self._conn = conn
self._main_panel = wx.ScrolledWindow(self, wx.ID_ANY)
self._main_panel.SetScrollbars(1, 1, 1, 1)
self._magcal_panels = {}
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._main_panel.SetSizer(self._sizer)
idle_text = wx.StaticText(self._main_panel, wx.ID_ANY)
idle_text.SetLabelMarkup('<i>No calibration messages received yet...</i>')
idle_text.SetForegroundColour('#444444')
self._sizer.AddStretchSpacer()
self._sizer.Add(
idle_text,
proportion=0,
flag=wx.ALIGN_CENTER | wx.ALL,
border=10,
)
self._sizer.AddStretchSpacer()
self._timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.timer_callback, self._timer)
self._timer.Start(200)
def add_compass(self, id):
if not self._magcal_panels:
self._sizer.Clear(deleteWindows=True)
self._magcal_panels_sizer = wx.BoxSizer(wx.HORIZONTAL)
self._sizer.Add(
self._magcal_panels_sizer,
proportion=1,
flag=wx.EXPAND,
)
legend = MagcalPanel.legend_panel(self._main_panel, wx.ID_ANY)
self._sizer.Add(
legend,
proportion=0,
flag=wx.ALIGN_CENTER,
)
self._magcal_panels[id] = MagcalPanel(self._main_panel, wx.ID_ANY)
self._magcal_panels_sizer.Add(
self._magcal_panels[id],
proportion=1,
border=10,
flag=wx.EXPAND | wx.ALL,
)
def timer_callback(self, evt):
close_requested = False
mavlink_msgs = {}
while self._conn.poll():
m = self._conn.recv()
if isinstance(m, str) and m == 'close':
close_requested = True
continue
if m.compass_id not in mavlink_msgs:
# Keep the last two messages so that we get the last progress
# if the last message is the calibration report.
mavlink_msgs[m.compass_id] = [None, m]
else:
l = mavlink_msgs[m.compass_id]
l[0] = l[1]
l[1] = m
if close_requested:
self._timer.Stop()
self.Destroy()
return
if not mavlink_msgs:
return
needs_fit = False
for k in mavlink_msgs:
if k not in self._magcal_panels:
self.add_compass(k)
needs_fit = True
if needs_fit:
self._sizer.Fit(self)
for k, l in mavlink_msgs.items():
for m in l:
if not m:
continue
panel = self._magcal_panels[k]
if m.get_type() == 'MAG_CAL_PROGRESS':
panel.mavlink_magcal_progress(m)
elif m.get_type() == 'MAG_CAL_REPORT':
panel.mavlink_magcal_report(m)
| gpl-3.0 |
ssamot/ce888 | labs/lab2/permutations.py | 1 | 2483 | import matplotlib
matplotlib.use('Agg')
import pandas as pd
import random
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import numpy.random as npr
def permutation_resampling(num_samples, case, control):
"""Returns p-value that statistic for case is different
from statistc for control."""
observed_diff = abs(np.mean(case) - np.mean(control))
num_case = len(case)
combined = np.concatenate([case, control])
diffs = []
for i in range(num_samples):
xs = npr.permutation(combined)
diff = np.mean(xs[:num_case]) - np.mean(xs[num_case:])
diffs.append(diff)
pval = (np.sum(diffs > observed_diff))/float(num_samples)
return pval
# def permutation_resampling(iterations, new, old):
# #data = pd.concat([old,new], ignore_index = True).values
# data = np.concatenate([new,old])
# print data.shape
# #samples = np.random.choice(data,replace = True, size = [iterations, len(data)])
# samples = []
# for i in range(iterations):
# s = np.random.permutation(data)
# samples.append(s)
# data_mean = (new.mean() - old.mean())
# vals = []
# for sample in samples:
# sample_new, sample_old = sample[:len(new)], sample[len(new):]
# sta = (sample_new.mean() - sample_old.mean() )
# if (sta > data_mean):
# higher = 1.0
# else:
# higher = 0.0
# vals.append(higher)
# b = np.array(vals)
# #print b.mean()
# return b.mean()
if __name__ == "__main__":
df = pd.read_csv('./vehicles.csv')
#print df
new = df[df.columns[0]].dropna().values
old = df[df.columns[1]].dropna().values
#new = np.array([100.0,102]*5)
#old = np.array([100.0,99]*20)
# old = np.array([0,0,0,0,0,0,1,0,0,1,0])
# new = np.array([1,0,0,1,1,1,0,0,0,1,0])
old = np.array([0,1,1,1,0,1,1,0,0,1,0])
new = np.array([0,1,1,0,1,1,0,1,1,1,0,0,1,1,1,1,1,1,1])
print((old.mean(), new.mean(), len(old), len(new), new.mean() - old.mean()))
#exit()
boots = []
for i in range(500,20000,100):
boot = permutation_resampling(i, new, old)
print((i,boot))
boots.append([i,boot])
df_boot = pd.DataFrame(boots, columns=['Boostrap iterations','p-value'])
sns_plot = sns.lmplot(df_boot.columns[0],df_boot.columns[1], data=df_boot, fit_reg=False)
sns_plot.axes[0,0].set_xlim(0,)
sns_plot.savefig("permutations.png",bbox_inches='tight')
sns_plot.savefig("permutations.pdf",bbox_inches='tight')
#print ("Mean: %f")%(np.mean(data))
#print ("Var: %f")%(np.var(data))
| gpl-3.0 |
nelson-liu/paraphrase-id-tensorflow | scripts/run_model/run_siamese.py | 1 | 13787 | import argparse
import sys
import logging
import math
import numpy as np
import os
import pandas as pd
import pickle
import json
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
from duplicate_questions.data.data_manager import DataManager
from duplicate_questions.data.embedding_manager import EmbeddingManager
from duplicate_questions.data.instances.sts_instance import STSInstance
from duplicate_questions.models.siamese_bilstm.siamese_bilstm import SiameseBiLSTM
logger = logging.getLogger(__name__)
def main():
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
# Parse config arguments
argparser = argparse.ArgumentParser(
description=("Run a baseline Siamese BiLSTM model "
"for paraphrase identification."))
argparser.add_argument("mode", type=str,
choices=["train", "predict"],
help=("One of {train|predict}, to "
"indicate what you want the model to do. "
"If you pick \"predict\", then you must also "
"supply the path to a pretrained model and "
"DataIndexer to load."))
argparser.add_argument("--model_load_dir", type=str,
help=("The path to a directory with checkpoints to "
"load for evaluation or prediction. The "
"latest checkpoint will be loaded."))
argparser.add_argument("--dataindexer_load_path", type=str,
help=("The path to the dataindexer fit on the "
"train data, so we can properly index the "
"test data for evaluation or prediction."))
argparser.add_argument("--train_file", type=str,
default=os.path.join(project_dir,
"data/processed/quora/"
"train_cleaned_train_split.csv"),
help="Path to a file to train on.")
argparser.add_argument("--val_file", type=str,
default=os.path.join(project_dir,
"data/processed/quora/"
"train_cleaned_val_split.csv"),
help="Path to a file to monitor validation acc. on.")
argparser.add_argument("--test_file", type=str,
default=os.path.join(project_dir,
"data/processed/quora/"
"test_final.csv"))
argparser.add_argument("--batch_size", type=int, default=128,
help="Number of instances per batch.")
argparser.add_argument("--num_epochs", type=int, default=10,
help=("Number of epochs to perform in "
"training."))
argparser.add_argument("--early_stopping_patience", type=int, default=0,
help=("number of epochs with no validation "
"accuracy improvement after which training "
"will be stopped"))
argparser.add_argument("--num_sentence_words", type=int, default=30,
help=("The maximum length of a sentence. Longer "
"sentences will be truncated, and shorter "
"ones will be padded."))
argparser.add_argument("--word_embedding_dim", type=int, default=300,
help="Dimensionality of the word embedding layer")
argparser.add_argument("--pretrained_embeddings_file_path", type=str,
help="Path to a file with pretrained embeddings.",
default=os.path.join(project_dir,
"data/external/",
"glove.6B.300d.txt"))
argparser.add_argument("--fine_tune_embeddings", action="store_true",
help=("Whether to train the embedding layer "
"(if True), or keep it fixed (False)."))
argparser.add_argument("--rnn_hidden_size", type=int, default=256,
help=("The output dimension of the RNN."))
argparser.add_argument("--share_encoder_weights", action="store_true",
help=("Whether to use the same encoder on both "
"input sentences (thus sharing weights), "
"or a different one for each sentence"))
argparser.add_argument("--rnn_output_mode", type=str, default="last",
choices=["mean_pool", "last"],
help=("How to calculate the final sentence "
"representation from the RNN outputs. "
"\"mean_pool\" indicates that the outputs "
"will be averaged (with respect to padding), "
"and \"last\" indicates that the last "
"relevant output will be used as the "
"sentence representation."))
argparser.add_argument("--output_keep_prob", type=float, default=1.0,
help=("The proportion of RNN outputs to keep, "
"where the rest are dropped out."))
argparser.add_argument("--log_period", type=int, default=10,
help=("Number of steps between each summary "
"op evaluation."))
argparser.add_argument("--val_period", type=int, default=250,
help=("Number of steps between each evaluation of "
"validation performance."))
argparser.add_argument("--log_dir", type=str,
default=os.path.join(project_dir,
"logs/"),
help=("Directory to save logs to."))
argparser.add_argument("--save_period", type=int, default=250,
help=("Number of steps between each "
"model checkpoint"))
argparser.add_argument("--save_dir", type=str,
default=os.path.join(project_dir,
"models/"),
help=("Directory to save model checkpoints to."))
argparser.add_argument("--run_id", type=str, required=True,
help=("Identifying run ID for this run. If "
"predicting, you probably want this "
"to be the same as the train run_id"))
argparser.add_argument("--model_name", type=str, required=True,
help=("Identifying model name for this run. If"
"predicting, you probably want this "
"to be the same as the train run_id"))
argparser.add_argument("--reweight_predictions_for_kaggle", action="store_true",
help=("Only relevant when predicting. Whether to "
"reweight the prediction probabilities to "
"account for class proportion discrepancy "
"between train and test."))
config = argparser.parse_args()
model_name = config.model_name
run_id = config.run_id
mode = config.mode
# Get the data.
batch_size = config.batch_size
if mode == "train":
# Read the train data from a file, and use it to index the validation data
data_manager = DataManager(STSInstance)
num_sentence_words = config.num_sentence_words
get_train_data_gen, train_data_size = data_manager.get_train_data_from_file(
[config.train_file],
max_lengths={"num_sentence_words": num_sentence_words})
get_val_data_gen, val_data_size = data_manager.get_validation_data_from_file(
[config.val_file], max_lengths={"num_sentence_words": num_sentence_words})
else:
# Load the fitted DataManager, and use it to index the test data
logger.info("Loading pickled DataManager "
"from {}".format(config.dataindexer_load_path))
data_manager = pickle.load(open(config.dataindexer_load_path, "rb"))
test_data_gen, test_data_size = data_manager.get_test_data_from_file(
[config.test_file])
vars(config)["word_vocab_size"] = data_manager.data_indexer.get_vocab_size()
# Log the run parameters.
log_dir = config.log_dir
log_path = os.path.join(log_dir, model_name, run_id.zfill(2))
logger.info("Writing logs to {}".format(log_path))
if not os.path.exists(log_path):
logger.info("log path {} does not exist, "
"creating it".format(log_path))
os.makedirs(log_path)
params_path = os.path.join(log_path, mode + "params.json")
logger.info("Writing params to {}".format(params_path))
with open(params_path, 'w') as params_file:
json.dump(vars(config), params_file, indent=4)
# Get the embeddings.
embedding_manager = EmbeddingManager(data_manager.data_indexer)
embedding_matrix = embedding_manager.get_embedding_matrix(
config.word_embedding_dim,
config.pretrained_embeddings_file_path)
vars(config)["word_embedding_matrix"] = embedding_matrix
# Initialize the model.
model = SiameseBiLSTM(vars(config))
model.build_graph()
if mode == "train":
# Train the model.
num_epochs = config.num_epochs
num_train_steps_per_epoch = int(math.ceil(train_data_size / batch_size))
num_val_steps = int(math.ceil(val_data_size / batch_size))
log_period = config.log_period
val_period = config.val_period
save_period = config.save_period
save_dir = os.path.join(config.save_dir, model_name, run_id.zfill(2) + "/")
save_path = os.path.join(save_dir, model_name + "-" + run_id.zfill(2))
logger.info("Checkpoints will be written to {}".format(save_dir))
if not os.path.exists(save_dir):
logger.info("save path {} does not exist, "
"creating it".format(save_dir))
os.makedirs(save_dir)
logger.info("Saving fitted DataManager to {}".format(save_dir))
data_manager_pickle_name = "{}-{}-DataManager.pkl".format(model_name,
run_id.zfill(2))
pickle.dump(data_manager,
open(os.path.join(save_dir, data_manager_pickle_name), "wb"))
patience = config.early_stopping_patience
model.train(get_train_instance_generator=get_train_data_gen,
get_val_instance_generator=get_val_data_gen,
batch_size=batch_size,
num_train_steps_per_epoch=num_train_steps_per_epoch,
num_epochs=num_epochs,
num_val_steps=num_val_steps,
save_path=save_path,
log_path=log_path,
log_period=log_period,
val_period=val_period,
save_period=save_period,
patience=patience)
else:
# Predict with the model
model_load_dir = config.model_load_dir
num_test_steps = int(math.ceil(test_data_size / batch_size))
# Numpy array of shape (num_test_examples, 2)
raw_predictions = model.predict(get_test_instance_generator=test_data_gen,
model_load_dir=model_load_dir,
batch_size=batch_size,
num_test_steps=num_test_steps)
# Remove the first column, so we're left with just the probabilities
# that a question is a duplicate.
is_duplicate_probabilities = np.delete(raw_predictions, 0, 1)
# The class balance between kaggle train and test seems different.
# This edits prediction probability to account for the discrepancy.
# See: https://www.kaggle.com/c/quora-question-pairs/discussion/31179
if config.reweight_predictions_for_kaggle:
positive_weight = 0.165 / 0.37
negative_weight = (1 - 0.165) / (1 - 0.37)
is_duplicate_probabilities = ((positive_weight * is_duplicate_probabilities) /
(positive_weight * is_duplicate_probabilities +
negative_weight *
(1 - is_duplicate_probabilities)))
# Write the predictions to an output submission file
output_predictions_path = os.path.join(log_path, model_name + "-" +
run_id.zfill(2) +
"-output_predictions.csv")
logger.info("Writing predictions to {}".format(output_predictions_path))
is_duplicate_df = pd.DataFrame(is_duplicate_probabilities)
is_duplicate_df.to_csv(output_predictions_path, index_label="test_id",
header=["is_duplicate"])
if __name__ == "__main__":
logging.basicConfig(format="%(asctime)s - %(levelname)s "
"- %(name)s - %(message)s",
level=logging.INFO)
main()
| mit |
hrjn/scikit-learn | sklearn/utils/tests/test_murmurhash.py | 79 | 2849 | # Author: Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float64)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
| bsd-3-clause |
louispotok/pandas | pandas/io/sas/sas_xport.py | 2 | 14809 | """
Read a SAS XPort format file into a Pandas DataFrame.
Based on code from Jack Cushman (github.com/jcushman/xport).
The file format is defined here:
https://support.sas.com/techsup/technote/ts140.pdf
"""
from datetime import datetime
import pandas as pd
from pandas.io.common import get_filepath_or_buffer, BaseIterator
from pandas import compat
import struct
import numpy as np
from pandas.util._decorators import Appender
import warnings
_correct_line1 = ("HEADER RECORD*******LIBRARY HEADER RECORD!!!!!!!"
"000000000000000000000000000000 ")
_correct_header1 = ("HEADER RECORD*******MEMBER HEADER RECORD!!!!!!!"
"000000000000000001600000000")
_correct_header2 = ("HEADER RECORD*******DSCRPTR HEADER RECORD!!!!!!!"
"000000000000000000000000000000 ")
_correct_obs_header = ("HEADER RECORD*******OBS HEADER RECORD!!!!!!!"
"000000000000000000000000000000 ")
_fieldkeys = ['ntype', 'nhfun', 'field_length', 'nvar0', 'name', 'label',
'nform', 'nfl', 'num_decimals', 'nfj', 'nfill', 'niform',
'nifl', 'nifd', 'npos', '_']
_base_params_doc = """\
Parameters
----------
filepath_or_buffer : string or file-like object
Path to SAS file or object implementing binary read method."""
_params2_doc = """\
index : identifier of index column
Identifier of column that should be used as index of the DataFrame.
encoding : string
Encoding for text data.
chunksize : int
Read file `chunksize` lines at a time, returns iterator."""
_format_params_doc = """\
format : string
File format, only `xport` is currently supported."""
_iterator_doc = """\
iterator : boolean, default False
Return XportReader object for reading file incrementally."""
_read_sas_doc = """Read a SAS file into a DataFrame.
%(_base_params_doc)s
%(_format_params_doc)s
%(_params2_doc)s
%(_iterator_doc)s
Returns
-------
DataFrame or XportReader
Examples
--------
Read a SAS Xport file:
>>> df = pandas.read_sas('filename.XPT')
Read a Xport file in 10,000 line chunks:
>>> itr = pandas.read_sas('filename.XPT', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
""" % {"_base_params_doc": _base_params_doc,
"_format_params_doc": _format_params_doc,
"_params2_doc": _params2_doc,
"_iterator_doc": _iterator_doc}
_xport_reader_doc = """\
Class for reading SAS Xport files.
%(_base_params_doc)s
%(_params2_doc)s
Attributes
----------
member_info : list
Contains information about the file
fields : list
Contains information about the variables in the file
""" % {"_base_params_doc": _base_params_doc,
"_params2_doc": _params2_doc}
_read_method_doc = """\
Read observations from SAS Xport file, returning as data frame.
Parameters
----------
nrows : int
Number of rows to read from data file; if None, read whole
file.
Returns
-------
A DataFrame.
"""
def _parse_date(datestr):
""" Given a date in xport format, return Python date. """
try:
# e.g. "16FEB11:10:07:55"
return datetime.strptime(datestr, "%d%b%y:%H:%M:%S")
except ValueError:
return pd.NaT
def _split_line(s, parts):
"""
Parameters
----------
s: string
Fixed-length string to split
parts: list of (name, length) pairs
Used to break up string, name '_' will be filtered from output.
Returns
-------
Dict of name:contents of string at given location.
"""
out = {}
start = 0
for name, length in parts:
out[name] = s[start:start + length].strip()
start += length
del out['_']
return out
def _handle_truncated_float_vec(vec, nbytes):
# This feature is not well documented, but some SAS XPORT files
# have 2-7 byte "truncated" floats. To read these truncated
# floats, pad them with zeros on the right to make 8 byte floats.
#
# References:
# https://github.com/jcushman/xport/pull/3
# The R "foreign" library
if nbytes != 8:
vec1 = np.zeros(len(vec), np.dtype('S8'))
dtype = np.dtype('S%d,S%d' % (nbytes, 8 - nbytes))
vec2 = vec1.view(dtype=dtype)
vec2['f0'] = vec
return vec2
return vec
def _parse_float_vec(vec):
"""
Parse a vector of float values representing IBM 8 byte floats into
native 8 byte floats.
"""
dtype = np.dtype('>u4,>u4')
vec1 = vec.view(dtype=dtype)
xport1 = vec1['f0']
xport2 = vec1['f1']
# Start by setting first half of ieee number to first half of IBM
# number sans exponent
ieee1 = xport1 & 0x00ffffff
# Get the second half of the ibm number into the second half of
# the ieee number
ieee2 = xport2
# The fraction bit to the left of the binary point in the ieee
# format was set and the number was shifted 0, 1, 2, or 3
# places. This will tell us how to adjust the ibm exponent to be a
# power of 2 ieee exponent and how to shift the fraction bits to
# restore the correct magnitude.
shift = np.zeros(len(vec), dtype=np.uint8)
shift[np.where(xport1 & 0x00200000)] = 1
shift[np.where(xport1 & 0x00400000)] = 2
shift[np.where(xport1 & 0x00800000)] = 3
# shift the ieee number down the correct number of places then
# set the second half of the ieee number to be the second half
# of the ibm number shifted appropriately, ored with the bits
# from the first half that would have been shifted in if we
# could shift a double. All we are worried about are the low
# order 3 bits of the first half since we're only shifting by
# 1, 2, or 3.
ieee1 >>= shift
ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))
# clear the 1 bit to the left of the binary point
ieee1 &= 0xffefffff
# set the exponent of the ieee number to be the actual exponent
# plus the shift count + 1023. Or this into the first half of the
# ieee number. The ibm exponent is excess 64 but is adjusted by 65
# since during conversion to ibm format the exponent is
# incremented by 1 and the fraction bits left 4 positions to the
# right of the radix point. (had to add >> 24 because C treats &
# 0x7f as 0x7f000000 and Python doesn't)
ieee1 |= ((((((xport1 >> 24) & 0x7f) - 65) << 2) +
shift + 1023) << 20) | (xport1 & 0x80000000)
ieee = np.empty((len(ieee1),), dtype='>u4,>u4')
ieee['f0'] = ieee1
ieee['f1'] = ieee2
ieee = ieee.view(dtype='>f8')
ieee = ieee.astype('f8')
return ieee
class XportReader(BaseIterator):
__doc__ = _xport_reader_doc
def __init__(self, filepath_or_buffer, index=None, encoding='ISO-8859-1',
chunksize=None):
self._encoding = encoding
self._lines_read = 0
self._index = index
self._chunksize = chunksize
if isinstance(filepath_or_buffer, str):
(filepath_or_buffer, encoding,
compression, should_close) = get_filepath_or_buffer(
filepath_or_buffer, encoding=encoding)
if isinstance(filepath_or_buffer, (str, compat.text_type, bytes)):
self.filepath_or_buffer = open(filepath_or_buffer, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = filepath_or_buffer.read()
try:
contents = contents.encode(self._encoding)
except:
pass
self.filepath_or_buffer = compat.BytesIO(contents)
self._read_header()
def close(self):
self.filepath_or_buffer.close()
def _get_row(self):
return self.filepath_or_buffer.read(80).decode()
def _read_header(self):
self.filepath_or_buffer.seek(0)
# read file header
line1 = self._get_row()
if line1 != _correct_line1:
self.close()
raise ValueError("Header record is not an XPORT file.")
line2 = self._get_row()
fif = [['prefix', 24], ['version', 8], ['OS', 8],
['_', 24], ['created', 16]]
file_info = _split_line(line2, fif)
if file_info['prefix'] != "SAS SAS SASLIB":
self.close()
raise ValueError("Header record has invalid prefix.")
file_info['created'] = _parse_date(file_info['created'])
self.file_info = file_info
line3 = self._get_row()
file_info['modified'] = _parse_date(line3[:16])
# read member header
header1 = self._get_row()
header2 = self._get_row()
headflag1 = header1.startswith(_correct_header1)
headflag2 = (header2 == _correct_header2)
if not (headflag1 and headflag2):
self.close()
raise ValueError("Member header not found")
# usually 140, could be 135
fieldnamelength = int(header1[-5:-2])
# member info
mem = [['prefix', 8], ['set_name', 8], ['sasdata', 8],
['version', 8], ['OS', 8], ['_', 24], ['created', 16]]
member_info = _split_line(self._get_row(), mem)
mem = [['modified', 16], ['_', 16], ['label', 40], ['type', 8]]
member_info.update(_split_line(self._get_row(), mem))
member_info['modified'] = _parse_date(member_info['modified'])
member_info['created'] = _parse_date(member_info['created'])
self.member_info = member_info
# read field names
types = {1: 'numeric', 2: 'char'}
fieldcount = int(self._get_row()[54:58])
datalength = fieldnamelength * fieldcount
# round up to nearest 80
if datalength % 80:
datalength += 80 - datalength % 80
fielddata = self.filepath_or_buffer.read(datalength)
fields = []
obs_length = 0
while len(fielddata) >= fieldnamelength:
# pull data for one field
field, fielddata = (fielddata[:fieldnamelength],
fielddata[fieldnamelength:])
# rest at end gets ignored, so if field is short, pad out
# to match struct pattern below
field = field.ljust(140)
fieldstruct = struct.unpack('>hhhh8s40s8shhh2s8shhl52s', field)
field = dict(zip(_fieldkeys, fieldstruct))
del field['_']
field['ntype'] = types[field['ntype']]
fl = field['field_length']
if field['ntype'] == 'numeric' and ((fl < 2) or (fl > 8)):
self.close()
msg = "Floating field width {0} is not between 2 and 8."
raise TypeError(msg.format(fl))
for k, v in field.items():
try:
field[k] = v.strip()
except AttributeError:
pass
obs_length += field['field_length']
fields += [field]
header = self._get_row()
if not header == _correct_obs_header:
self.close()
raise ValueError("Observation header not found.")
self.fields = fields
self.record_length = obs_length
self.record_start = self.filepath_or_buffer.tell()
self.nobs = self._record_count()
self.columns = [x['name'].decode() for x in self.fields]
# Setup the dtype.
dtypel = []
for i, field in enumerate(self.fields):
dtypel.append(('s' + str(i), "S" + str(field['field_length'])))
dtype = np.dtype(dtypel)
self._dtype = dtype
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def _record_count(self):
"""
Get number of records in file.
This is maybe suboptimal because we have to seek to the end of
the file.
Side effect: returns file position to record_start.
"""
self.filepath_or_buffer.seek(0, 2)
total_records_length = (self.filepath_or_buffer.tell() -
self.record_start)
if total_records_length % 80 != 0:
warnings.warn("xport file may be corrupted")
if self.record_length > 80:
self.filepath_or_buffer.seek(self.record_start)
return total_records_length // self.record_length
self.filepath_or_buffer.seek(-80, 2)
last_card = self.filepath_or_buffer.read(80)
last_card = np.frombuffer(last_card, dtype=np.uint64)
# 8 byte blank
ix = np.flatnonzero(last_card == 2314885530818453536)
if len(ix) == 0:
tail_pad = 0
else:
tail_pad = 8 * len(ix)
self.filepath_or_buffer.seek(self.record_start)
return (total_records_length - tail_pad) // self.record_length
def get_chunk(self, size=None):
"""
Reads lines from Xport file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
def _missing_double(self, vec):
v = vec.view(dtype='u1,u1,u2,u4')
miss = (v['f1'] == 0) & (v['f2'] == 0) & (v['f3'] == 0)
miss1 = (((v['f0'] >= 0x41) & (v['f0'] <= 0x5a)) |
(v['f0'] == 0x5f) | (v['f0'] == 0x2e))
miss &= miss1
return miss
@Appender(_read_method_doc)
def read(self, nrows=None):
if nrows is None:
nrows = self.nobs
read_lines = min(nrows, self.nobs - self._lines_read)
read_len = read_lines * self.record_length
if read_len <= 0:
self.close()
raise StopIteration
raw = self.filepath_or_buffer.read(read_len)
data = np.frombuffer(raw, dtype=self._dtype, count=read_lines)
df = pd.DataFrame(index=range(read_lines))
for j, x in enumerate(self.columns):
vec = data['s%d' % j]
ntype = self.fields[j]['ntype']
if ntype == "numeric":
vec = _handle_truncated_float_vec(
vec, self.fields[j]['field_length'])
miss = self._missing_double(vec)
v = _parse_float_vec(vec)
v[miss] = np.nan
elif self.fields[j]['ntype'] == 'char':
v = [y.rstrip() for y in vec]
if compat.PY3:
if self._encoding is not None:
v = [y.decode(self._encoding) for y in v]
df[x] = v
if self._index is None:
df.index = range(self._lines_read, self._lines_read + read_lines)
else:
df = df.set_index(self._index)
self._lines_read += read_lines
return df
| bsd-3-clause |
Ziqi-Li/bknqgis | pandas/pandas/tests/indexing/test_iloc.py | 2 | 22256 | """ test positional based indexing with iloc """
import pytest
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.compat import lrange, lmap
from pandas import Series, DataFrame, date_range, concat, isna
from pandas.util import testing as tm
from pandas.tests.indexing.common import Base
class TestiLoc(Base):
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assert_raises_regex(IndexError,
'positional indexers '
'are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
pytest.raises(IndexError, lambda: df.iloc[[1, 30]])
pytest.raises(IndexError, lambda: df.iloc[[1, -30]])
pytest.raises(IndexError, lambda: df.iloc[[100]])
s = df['A']
pytest.raises(IndexError, lambda: s.iloc[[100]])
pytest.raises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assert_raises_regex(IndexError, msg):
df.iloc[30]
pytest.raises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assert_raises_regex(IndexError, msg):
s.iloc[30]
pytest.raises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
pytest.raises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
pytest.raises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
assert result == expected
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
assert isna(result)
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
assert result == 1
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
def test_iloc_setitem_list(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_iloc_setitem_pandas_object(self):
# GH 17193, affecting old numpy (1.7 and 1.8)
s_orig = Series([0, 1, 2, 3])
expected = Series([0, -1, -2, 3])
s = s_orig.copy()
s.iloc[Series([1, 2])] = [-1, -2]
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.iloc[pd.Index([1, 2])] = [-1, -2]
tm.assert_series_equal(s, expected)
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
assert result == exp
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
assert result == exp
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
assert result == exp
# out-of-bounds exception
pytest.raises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
pytest.raises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
assert result == 1
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
assert result == 1
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
pytest.raises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
pytest.raises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_iloc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.iloc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_identity_slice_returns_new_object(self):
# GH13873
original_df = DataFrame({'a': [1, 2, 3]})
sliced_df = original_df.iloc[:]
assert sliced_df is not original_df
# should be a shallow copy
original_df['a'] = [4, 4, 4]
assert (sliced_df['a'] == 4).all()
original_series = Series([1, 2, 3, 4, 5, 6])
sliced_series = original_series.iloc[:]
assert sliced_series is not original_series
# should also be a shallow copy
original_series[:3] = [7, 8, 9]
assert all(sliced_series[:3] == [7, 8, 9])
| gpl-2.0 |
mblondel/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
ScottFreeLLC/AlphaPy | alphapy/sport_flow.py | 1 | 29317 | ################################################################################
#
# Package : AlphaPy
# Module : sport_flow
# Created : July 11, 2013
#
# Copyright 2017 ScottFree Analytics LLC
# Mark Conway & Robert D. Scott II
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# Imports
#
print(__doc__)
from alphapy.__main__ import main_pipeline
from alphapy.frame import read_frame
from alphapy.frame import write_frame
from alphapy.globals import ModelType
from alphapy.globals import Partition, datasets
from alphapy.globals import PSEP, SSEP, USEP
from alphapy.globals import WILDCARD
from alphapy.model import get_model_config
from alphapy.model import Model
from alphapy.space import Space
from alphapy.utilities import valid_date
import argparse
import datetime
from itertools import groupby
import logging
import math
import numpy as np
import os
import pandas as pd
import sys
import warnings
warnings.simplefilter(action='ignore', category=DeprecationWarning)
warnings.simplefilter(action='ignore', category=FutureWarning)
import yaml
#
# Initialize logger
#
logger = logging.getLogger(__name__)
#
# Sports Fields
#
# The following fields are repeated for:
# 1. 'home'
# 2. 'away'
# 3. 'delta'
#
# Note that [Target]s will not be merged into the Game table;
# these targets will be predictors in the Game table that are
# generated after each game result. All of the fields below
# are predictors and are generated a priori, i.e., we calculate
# deltas from the last previously played game for each team and
# these data go into the row for the next game to be played.
#
sports_dict = {'wins' : int,
'losses' : int,
'ties' : int,
'days_since_first_game' : int,
'days_since_previous_game' : int,
'won_on_points' : bool,
'lost_on_points' : bool,
'won_on_spread' : bool,
'lost_on_spread' : bool,
'point_win_streak' : int,
'point_loss_streak' : int,
'point_margin_game' : int,
'point_margin_season' : int,
'point_margin_season_avg' : float,
'point_margin_streak' : int,
'point_margin_streak_avg' : float,
'point_margin_ngames' : int,
'point_margin_ngames_avg' : float,
'cover_win_streak' : int,
'cover_loss_streak' : int,
'cover_margin_game' : float,
'cover_margin_season' : float,
'cover_margin_season_avg' : float,
'cover_margin_streak' : float,
'cover_margin_streak_avg' : float,
'cover_margin_ngames' : float,
'cover_margin_ngames_avg' : float,
'total_points' : int,
'overunder_margin' : float,
'over' : bool,
'under' : bool,
'over_streak' : int,
'under_streak' : int,
'overunder_season' : float,
'overunder_season_avg' : float,
'overunder_streak' : float,
'overunder_streak_avg' : float,
'overunder_ngames' : float,
'overunder_ngames_avg' : float}
#
# These are the leaders. Generally, we try to predict one of these
# variables as the target and lag the remaining ones.
#
game_dict = {'point_margin_game' : int,
'won_on_points' : bool,
'lost_on_points' : bool,
'cover_margin_game' : float,
'won_on_spread' : bool,
'lost_on_spread' : bool,
'overunder_margin' : float,
'over' : bool,
'under' : bool}
#
# Function get_sport_config
#
def get_sport_config():
r"""Read the configuration file for SportFlow.
Parameters
----------
None : None
Returns
-------
specs : dict
The parameters for controlling SportFlow.
"""
# Read the configuration file
full_path = SSEP.join(['.', 'config', 'sport.yml'])
with open(full_path, 'r') as ymlfile:
cfg = yaml.load(ymlfile)
# Store configuration parameters in dictionary
specs = {}
# Section: sport
specs['league'] = cfg['sport']['league']
specs['points_max'] = cfg['sport']['points_max']
specs['points_min'] = cfg['sport']['points_min']
specs['random_scoring'] = cfg['sport']['random_scoring']
specs['rolling_window'] = cfg['sport']['rolling_window']
specs['seasons'] = cfg['sport']['seasons']
# Log the sports parameters
logger.info('SPORT PARAMETERS:')
logger.info('league = %s', specs['league'])
logger.info('points_max = %d', specs['points_max'])
logger.info('points_min = %d', specs['points_min'])
logger.info('random_scoring = %r', specs['random_scoring'])
logger.info('rolling_window = %d', specs['rolling_window'])
logger.info('seasons = %s', specs['seasons'])
# Game Specifications
return specs
#
# Function get_point_margin
#
def get_point_margin(row, score, opponent_score):
r"""Get the point margin for a game.
Parameters
----------
row : pandas.Series
The row of a game.
score : int
The score for one team.
opponent_score : int
The score for the other team.
Returns
-------
point_margin : int
The resulting point margin (0 if NaN).
"""
point_margin = 0
nans = math.isnan(row[score]) or math.isnan(row[opponent_score])
if not nans:
point_margin = row[score] - row[opponent_score]
return point_margin
#
# Function get_wins
#
def get_wins(point_margin):
r"""Determine a win based on the point margin.
Parameters
----------
point_margin : int
The point margin can be positive, zero, or negative.
Returns
-------
won : int
If the point margin is greater than 0, return 1, else 0.
"""
won = 1 if point_margin > 0 else 0
return won
#
# Function get_losses
#
def get_losses(point_margin):
r"""Determine a loss based on the point margin.
Parameters
----------
point_margin : int
The point margin can be positive, zero, or negative.
Returns
-------
lost : int
If the point margin is less than 0, return 1, else 0.
"""
lost = 1 if point_margin < 0 else 0
return lost
#
# Function get_ties
#
def get_ties(point_margin):
r"""Determine a tie based on the point margin.
Parameters
----------
point_margin : int
The point margin can be positive, zero, or negative.
Returns
-------
tied : int
If the point margin is equal to 0, return 1, else 0.
"""
tied = 1 if point_margin == 0 else 0
return tied
#
# Function get_day_offset
#
def get_day_offset(date_vector):
r"""Compute the day offsets between games.
Parameters
----------
date_vector : pandas.Series
The date column.
Returns
-------
day_offset : pandas.Series
A vector of day offsets between adjacent dates.
"""
dv = pd.to_datetime(date_vector)
offsets = pd.to_datetime(dv) - pd.to_datetime(dv[0])
day_offset = offsets.astype('timedelta64[D]').astype(int)
return day_offset
#
# Function get_series_diff
#
def get_series_diff(series):
r"""Perform the difference operation on a series.
Parameters
----------
series : pandas.Series
The series for the ``diff`` operation.
Returns
-------
new_series : pandas.Series
The differenced series.
"""
new_series = pd.Series(len(series))
new_series = series.diff()
new_series[0] = 0
return new_series
#
# Function get_streak
#
def get_streak(series, start_index, window):
r"""Calculate the current streak.
Parameters
----------
series : pandas.Series
A Boolean series for calculating streaks.
start_index : int
The offset of the series to start counting.
window : int
The period over which to count.
Returns
-------
streak : int
The count value for the current streak.
"""
if window <= 0:
window = len(series)
i = start_index
streak = 0
while i >= 0 and (start_index-i+1) < window and series[i]:
streak += 1
i -= 1
return streak
#
# Function add_features
#
def add_features(frame, fdict, flen, prefix=''):
r"""Add new features to a dataframe with the specified dictionary.
Parameters
----------
frame : pandas.DataFrame
The dataframe to extend with new features defined by ``fdict``.
fdict : dict
A dictionary of column names (key) and data types (value).
flen : int
Length of ``frame``.
prefix : str, optional
Prepend all columns with a prefix.
Returns
-------
frame : pandas.DataFrame
The dataframe with the added features.
"""
# generate sequences
seqint = [0] * flen
seqfloat = [0.0] * flen
seqbool = [False] * flen
# initialize new fields in frame
for key, value in list(fdict.items()):
newkey = key
if prefix:
newkey = PSEP.join([prefix, newkey])
if value == int:
frame[newkey] = pd.Series(seqint)
elif value == float:
frame[newkey] = pd.Series(seqfloat)
elif value == bool:
frame[newkey] = pd.Series(seqbool)
else:
raise ValueError("Type to generate feature series not found")
return frame
#
# Function generate_team_frame
#
def generate_team_frame(team, tf, home_team, away_team, window):
r"""Calculate statistics for each team.
Parameters
----------
team : str
The abbreviation for the team.
tf : pandas.DataFrame
The initial team frame.
home_team : str
Label for the home team.
away_team : str
Label for the away team.
window : int
The value for the rolling window to calculate means and sums.
Returns
-------
tf : pandas.DataFrame
The completed team frame.
"""
# Initialize new features
tf = add_features(tf, sports_dict, len(tf))
# Daily Offsets
tf['days_since_first_game'] = get_day_offset(tf['date'])
tf['days_since_previous_game'] = get_series_diff(tf['days_since_first_game'])
# Team Loop
for index, row in tf.iterrows():
if team == row[home_team]:
tf['point_margin_game'].at[index] = get_point_margin(row, 'home.score', 'away.score')
line = row['line']
elif team == row[away_team]:
tf['point_margin_game'].at[index] = get_point_margin(row, 'away.score', 'home.score')
line = -row['line']
else:
raise KeyError("Team not found in Team Frame")
if index == 0:
tf['wins'].at[index] = get_wins(tf['point_margin_game'].at[index])
tf['losses'].at[index] = get_losses(tf['point_margin_game'].at[index])
tf['ties'].at[index] = get_ties(tf['point_margin_game'].at[index])
else:
tf['wins'].at[index] = tf['wins'].at[index-1] + get_wins(tf['point_margin_game'].at[index])
tf['losses'].at[index] = tf['losses'].at[index-1] + get_losses(tf['point_margin_game'].at[index])
tf['ties'].at[index] = tf['ties'].at[index-1] + get_ties(tf['point_margin_game'].at[index])
tf['won_on_points'].at[index] = True if tf['point_margin_game'].at[index] > 0 else False
tf['lost_on_points'].at[index] = True if tf['point_margin_game'].at[index] < 0 else False
tf['cover_margin_game'].at[index] = tf['point_margin_game'].at[index] + line
tf['won_on_spread'].at[index] = True if tf['cover_margin_game'].at[index] > 0 else False
tf['lost_on_spread'].at[index] = True if tf['cover_margin_game'].at[index] <= 0 else False
nans = math.isnan(row['home.score']) or math.isnan(row['away.score'])
if not nans:
tf['total_points'].at[index] = row['home.score'] + row['away.score']
nans = math.isnan(row['over_under'])
if not nans:
tf['overunder_margin'].at[index] = tf['total_points'].at[index] - row['over_under']
tf['over'].at[index] = True if tf['overunder_margin'].at[index] > 0 else False
tf['under'].at[index] = True if tf['overunder_margin'].at[index] < 0 else False
tf['point_win_streak'].at[index] = get_streak(tf['won_on_points'], index, 0)
tf['point_loss_streak'].at[index] = get_streak(tf['lost_on_points'], index, 0)
tf['cover_win_streak'].at[index] = get_streak(tf['won_on_spread'], index, 0)
tf['cover_loss_streak'].at[index] = get_streak(tf['lost_on_spread'], index, 0)
tf['over_streak'].at[index] = get_streak(tf['over'], index, 0)
tf['under_streak'].at[index] = get_streak(tf['under'], index, 0)
# Handle the streaks
if tf['point_win_streak'].at[index] > 0:
streak = tf['point_win_streak'].at[index]
elif tf['point_loss_streak'].at[index] > 0:
streak = tf['point_loss_streak'].at[index]
else:
streak = 1
tf['point_margin_streak'].at[index] = tf['point_margin_game'][index-streak+1:index+1].sum()
tf['point_margin_streak_avg'].at[index] = tf['point_margin_game'][index-streak+1:index+1].mean()
if tf['cover_win_streak'].at[index] > 0:
streak = tf['cover_win_streak'].at[index]
elif tf['cover_loss_streak'].at[index] > 0:
streak = tf['cover_loss_streak'].at[index]
else:
streak = 1
tf['cover_margin_streak'].at[index] = tf['cover_margin_game'][index-streak+1:index+1].sum()
tf['cover_margin_streak_avg'].at[index] = tf['cover_margin_game'][index-streak+1:index+1].mean()
if tf['over_streak'].at[index] > 0:
streak = tf['over_streak'].at[index]
elif tf['under_streak'].at[index] > 0:
streak = tf['under_streak'].at[index]
else:
streak = 1
tf['overunder_streak'].at[index] = tf['overunder_margin'][index-streak+1:index+1].sum()
tf['overunder_streak_avg'].at[index] = tf['overunder_margin'][index-streak+1:index+1].mean()
# Rolling and Expanding Variables
tf['point_margin_season'] = tf['point_margin_game'].cumsum()
tf['point_margin_season_avg'] = tf['point_margin_game'].expanding().mean()
tf['point_margin_ngames'] = tf['point_margin_game'].rolling(window=window, min_periods=1).sum()
tf['point_margin_ngames_avg'] = tf['point_margin_game'].rolling(window=window, min_periods=1).mean()
tf['cover_margin_season'] = tf['cover_margin_game'].cumsum()
tf['cover_margin_season_avg'] = tf['cover_margin_game'].expanding().mean()
tf['cover_margin_ngames'] = tf['cover_margin_game'].rolling(window=window, min_periods=1).sum()
tf['cover_margin_ngames_avg'] = tf['cover_margin_game'].rolling(window=window, min_periods=1).mean()
tf['overunder_season'] = tf['overunder_margin'].cumsum()
tf['overunder_season_avg'] = tf['overunder_margin'].expanding().mean()
tf['overunder_ngames'] = tf['overunder_margin'].rolling(window=window, min_periods=1).sum()
tf['overunder_ngames_avg'] = tf['overunder_margin'].rolling(window=window, min_periods=1).mean()
return tf
#
# Function get_team_frame
#
def get_team_frame(game_frame, team, home, away):
r"""Calculate statistics for each team.
Parameters
----------
game_frame : pandas.DataFrame
The game frame for a given season.
team : str
The team abbreviation.
home : str
The label of the home team column.
away : int
The label of the away team column.
Returns
-------
team_frame : pandas.DataFrame
The extracted team frame.
"""
team_frame = game_frame[(game_frame[home] == team) | (game_frame[away] == team)]
return team_frame
#
# Function insert_model_data
#
def insert_model_data(mf, mpos, mdict, tf, tpos, prefix):
r"""Insert a row from the team frame into the model frame.
Parameters
----------
mf : pandas.DataFrame
The model frame for a single season.
mpos : int
The position in the model frame where to insert the row.
mdict : dict
A dictionary of column names (key) and data types (value).
tf : pandas.DataFrame
The team frame for a season.
tpos : int
The position of the row in the team frame.
prefix : str
The prefix to join with the ``mdict`` key.
Returns
-------
mf : pandas.DataFrame
The .
"""
team_row = tf.iloc[tpos]
for key, value in list(mdict.items()):
newkey = key
if prefix:
newkey = PSEP.join([prefix, newkey])
mf.at[mpos, newkey] = team_row[key]
return mf
#
# Function generate_delta_data
#
def generate_delta_data(frame, fdict, prefix1, prefix2):
r"""Subtract two similar columns to get the delta value.
Parameters
----------
frame : pandas.DataFrame
The input model frame.
fdict : dict
A dictionary of column names (key) and data types (value).
prefix1 : str
The prefix of the first team.
prefix2 : str
The prefix of the second team.
Returns
-------
frame : pandas.DataFrame
The completed dataframe with the delta data.
"""
for key, value in list(fdict.items()):
newkey = PSEP.join(['delta', key])
key1 = PSEP.join([prefix1, key])
key2 = PSEP.join([prefix2, key])
frame[newkey] = frame[key1] - frame[key2]
return frame
#
# Function main
#
def main(args=None):
r"""The main program for SportFlow.
Notes
-----
(1) Initialize logging.
(2) Parse the command line arguments.
(3) Get the game configuration.
(4) Get the model configuration.
(5) Generate game frames for each season.
(6) Create statistics for each team.
(7) Merge the team frames into the final model frame.
(8) Run the AlphaPy pipeline.
Raises
------
ValueError
Training date must be before prediction date.
"""
# Logging
logging.basicConfig(format="[%(asctime)s] %(levelname)s\t%(message)s",
filename="sport_flow.log", filemode='a', level=logging.DEBUG,
datefmt='%m/%d/%y %H:%M:%S')
formatter = logging.Formatter("[%(asctime)s] %(levelname)s\t%(message)s",
datefmt='%m/%d/%y %H:%M:%S')
console = logging.StreamHandler()
console.setFormatter(formatter)
console.setLevel(logging.INFO)
logging.getLogger().addHandler(console)
logger = logging.getLogger(__name__)
# Start the pipeline
logger.info('*'*80)
logger.info("SportFlow Start")
logger.info('*'*80)
# Argument Parsing
parser = argparse.ArgumentParser(description="SportFlow Parser")
parser.add_argument('--pdate', dest='predict_date',
help="prediction date is in the format: YYYY-MM-DD",
required=False, type=valid_date)
parser.add_argument('--tdate', dest='train_date',
help="training date is in the format: YYYY-MM-DD",
required=False, type=valid_date)
parser.add_mutually_exclusive_group(required=False)
parser.add_argument('--predict', dest='predict_mode', action='store_true')
parser.add_argument('--train', dest='predict_mode', action='store_false')
parser.set_defaults(predict_mode=False)
args = parser.parse_args()
# Set train and predict dates
if args.train_date:
train_date = args.train_date
else:
train_date = pd.datetime(1900, 1, 1).strftime("%Y-%m-%d")
if args.predict_date:
predict_date = args.predict_date
else:
predict_date = datetime.date.today().strftime("%Y-%m-%d")
# Verify that the dates are in sequence.
if train_date >= predict_date:
raise ValueError("Training date must be before prediction date")
else:
logger.info("Training Date: %s", train_date)
logger.info("Prediction Date: %s", predict_date)
# Read game configuration file
sport_specs = get_sport_config()
# Section: game
league = sport_specs['league']
points_max = sport_specs['points_max']
points_min = sport_specs['points_min']
random_scoring = sport_specs['random_scoring']
seasons = sport_specs['seasons']
window = sport_specs['rolling_window']
# Read model configuration file
specs = get_model_config()
# Add command line arguments to model specifications
specs['predict_mode'] = args.predict_mode
specs['predict_date'] = args.predict_date
specs['train_date'] = args.train_date
# Unpack model arguments
directory = specs['directory']
target = specs['target']
# Create directories if necessary
output_dirs = ['config', 'data', 'input', 'model', 'output', 'plots']
for od in output_dirs:
output_dir = SSEP.join([directory, od])
if not os.path.exists(output_dir):
logger.info("Creating directory %s", output_dir)
os.makedirs(output_dir)
# Create the game scores space
space = Space('game', 'scores', '1g')
#
# Derived Variables
#
series = space.schema
team1_prefix = 'home'
team2_prefix = 'away'
home_team = PSEP.join([team1_prefix, 'team'])
away_team = PSEP.join([team2_prefix, 'team'])
#
# Read in the game frame. This is the feature generation phase.
#
logger.info("Reading Game Data")
data_dir = SSEP.join([directory, 'data'])
file_base = USEP.join([league, space.subject, space.schema, space.fractal])
df = read_frame(data_dir, file_base, specs['extension'], specs['separator'])
logger.info("Total Game Records: %d", df.shape[0])
#
# Locate any rows with null values
#
null_rows = df.isnull().any(axis=1)
null_indices = [i for i, val in enumerate(null_rows.tolist()) if val == True]
for i in null_indices:
logger.info("Null Record: %d on Date: %s", i, df.date[i])
#
# Run the game pipeline on a seasonal loop
#
if not seasons:
# run model on all seasons
seasons = df['season'].unique().tolist()
#
# Initialize the final frame
#
ff = pd.DataFrame()
#
# Iterate through each season of the game frame
#
for season in seasons:
# Generate a frame for each season
gf = df[df['season'] == season]
gf = gf.reset_index()
# Generate derived variables for the game frame
total_games = gf.shape[0]
if random_scoring:
gf['home.score'] = np.random.randint(points_min, points_max, total_games)
gf['away.score'] = np.random.randint(points_min, points_max, total_games)
gf['total_points'] = gf['home.score'] + gf['away.score']
# gf['line_delta'] = gf['line'] - gf['line_open']
# gf['over_under_delta'] = gf['over_under'] - gf['over_under_open']
gf = add_features(gf, game_dict, gf.shape[0])
for index, row in gf.iterrows():
gf['point_margin_game'].at[index] = get_point_margin(row, 'home.score', 'away.score')
gf['won_on_points'].at[index] = True if gf['point_margin_game'].at[index] > 0 else False
gf['lost_on_points'].at[index] = True if gf['point_margin_game'].at[index] < 0 else False
gf['cover_margin_game'].at[index] = gf['point_margin_game'].at[index] + row['line']
gf['won_on_spread'].at[index] = True if gf['cover_margin_game'].at[index] > 0 else False
gf['lost_on_spread'].at[index] = True if gf['cover_margin_game'].at[index] <= 0 else False
gf['overunder_margin'].at[index] = gf['total_points'].at[index] - row['over_under']
gf['over'].at[index] = True if gf['overunder_margin'].at[index] > 0 else False
gf['under'].at[index] = True if gf['overunder_margin'].at[index] < 0 else False
# Generate each team frame
team_frames = {}
teams = gf.groupby([home_team])
for team, data in teams:
team_frame = USEP.join([league, team.lower(), series, str(season)])
logger.info("Generating team frame: %s", team_frame)
tf = get_team_frame(gf, team, home_team, away_team)
tf = tf.reset_index()
tf = generate_team_frame(team, tf, home_team, away_team, window)
team_frames[team_frame] = tf
# Create the model frame, initializing the home and away frames
mdict = {k:v for (k,v) in list(sports_dict.items()) if v != bool}
team1_frame = pd.DataFrame()
team1_frame = add_features(team1_frame, mdict, gf.shape[0], prefix=team1_prefix)
team2_frame = pd.DataFrame()
team2_frame = add_features(team2_frame, mdict, gf.shape[0], prefix=team2_prefix)
frames = [gf, team1_frame, team2_frame]
mf = pd.concat(frames, axis=1)
# Loop through each team frame, inserting data into the model frame row
# get index+1 [if valid]
# determine if team is home or away to get prefix
# try: np.where((gf[home_team] == 'PHI') & (gf['date'] == '09/07/14'))[0][0]
# Assign team frame fields to respective model frame fields: set gf.at(pos, field)
for team, data in teams:
team_frame = USEP.join([league, team.lower(), series, str(season)])
logger.info("Merging team frame %s into model frame", team_frame)
tf = team_frames[team_frame]
for index in range(0, tf.shape[0]-1):
gindex = index + 1
model_row = tf.iloc[gindex]
key_date = model_row['date']
at_home = False
if team == model_row[home_team]:
at_home = True
key_team = model_row[home_team]
elif team == model_row[away_team]:
key_team = model_row[away_team]
else:
raise KeyError("Team %s not found in Team Frame" % team)
try:
if at_home:
mpos = np.where((mf[home_team] == key_team) & (mf['date'] == key_date))[0][0]
else:
mpos = np.where((mf[away_team] == key_team) & (mf['date'] == key_date))[0][0]
except:
raise IndexError("Team/Date Key not found in Model Frame")
# insert team data into model row
mf = insert_model_data(mf, mpos, mdict, tf, index, team1_prefix if at_home else team2_prefix)
# Compute delta data 'home' - 'away'
mf = generate_delta_data(mf, mdict, team1_prefix, team2_prefix)
# Append this to final frame
frames = [ff, mf]
ff = pd.concat(frames)
# Write out dataframes
input_dir = SSEP.join([directory, 'input'])
if args.predict_mode:
new_predict_frame = ff.loc[ff.date >= predict_date]
if len(new_predict_frame) <= 1:
raise ValueError("Prediction frame has length 1 or less")
# rewrite with all the features to the train and test files
logger.info("Saving prediction frame")
write_frame(new_predict_frame, input_dir, datasets[Partition.predict],
specs['extension'], specs['separator'])
else:
# split data into training and test data
new_train_frame = ff.loc[(ff.date >= train_date) & (ff.date < predict_date)]
if len(new_train_frame) <= 1:
raise ValueError("Training frame has length 1 or less")
new_test_frame = ff.loc[ff.date >= predict_date]
if len(new_test_frame) <= 1:
raise ValueError("Testing frame has length 1 or less")
# rewrite with all the features to the train and test files
logger.info("Saving training frame")
write_frame(new_train_frame, input_dir, datasets[Partition.train],
specs['extension'], specs['separator'])
logger.info("Saving testing frame")
write_frame(new_test_frame, input_dir, datasets[Partition.test],
specs['extension'], specs['separator'])
# Create the model from specs
logger.info("Running Model")
model = Model(specs)
# Run the pipeline
model = main_pipeline(model)
# Complete the pipeline
logger.info('*'*80)
logger.info("SportFlow End")
logger.info('*'*80)
#
# MAIN PROGRAM
#
if __name__ == "__main__":
main()
| apache-2.0 |
pkruskal/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
brix4dayz/TRiCAM2.0 | frame_grabber.py | 1 | 2011 | """
frame_grabber.py
>>> python frame_grabber.py [from dir] [to dir] [frame capture rate]
"""
import hpidol as hp
import cv2, sys, shutil
import numpy as np
import scipy.misc, os
import pandas as pd
from collections import Counter
from PIL import Image
def post_img(image):
return hp.recognize_logos(image)
def get_logos(job_id):
return hp.get_logos_result(job_id)
def do_videos(from_dir, to_dir, save_time = 1):
for video_name in os.listdir(from_dir):
csv_file = to_dir + "\\" + video_name[:-4] + ".csv"
if not os.path.isfile(csv_file):
f = open(csv_file, 'w')
f.write("video_time,job_id\n")
video_file = from_dir + "/" + video_name
cap = cv2.VideoCapture(video_file)
seconds_from_start = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
video_time = cap.get(cv2.cv.CV_CAP_PROP_POS_MSEC)
if ((video_time/1000) - seconds_from_start) > 1:
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#frame = cv2.equalizeHist(frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
scipy.misc.imsave("temp.jpg",frame)
#frame = Image.open("temp.jpg")
job_id = post_img(open("temp.jpg", 'rb'))
os.remove("temp.jpg")
f.write(str(video_time/1000) + "," + str(job_id['jobID']) + "\n")
seconds_from_start += save_time
f.close()
def get_logos_matrix(from_dir, to_file):
row_names = []
for csv in os.listdir(from_dir):
row_names.append(csv[:-4])
master_frame = pd.DataFrame(index = row_names)
for csv in os.listdir(from_dir):
csv_file = from_dir + "/" + csv
df = pd.read_csv(csv_file)
found_logos = []
for item in df["job_id"]:
logo = get_logos(item)
if (logo is not None) and logo != []:
print logo[0]
found_logos.append(logo[0])
for item in found_logos:
if item not in master_frame:
master_frame[item] = 0
master_frame[item][csv[:-4]] = int(master_frame[item][csv[:-4]]) + 1
master_frame.to_csv(to_file)
return pd.DataFrame.to_dict(master_frame)
| mit |
sanketloke/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
Brett777/Predict-Churn | DeployChurnEnsembleModel.py | 1 | 2262 | import os
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import h2o
import numpy as np
import pandas as pd
from tabulate import tabulate
# initialize the model scoring server
h2o.init(nthreads=1,max_mem_size=1, start_h2o=True, strict_version_check = False)
def churn_predict(State,AccountLength,AreaCode,Phone,IntlPlan,VMailPlan,VMailMessage,DayMins,DayCalls,DayCharge,EveMins,EveCalls,EveCharge,NightMins,NightCalls,NightCharge,IntlMins,IntlCalls,IntlCharge,CustServCalls):
# connect to the model scoring service
h2o.connect()
# open the downloaded model
ChurnPredictor = h2o.load_model(path='GBM-RF-ensemble')
# define a feature vector to evaluate with the model
newData = pd.DataFrame({'State' : State,
'Account Length' : AccountLength,
'Area Code' : AreaCode,
'Phone' : Phone,
'Int\'l Plan' : IntlPlan,
'VMail Plan' : VMailPlan,
'VMail Message' : VMailMessage,
'Day Mins' : DayMins,
'Day Calls' : DayCalls,
'Day Charge' : DayCharge,
'Eve Mins' : EveMins,
'Eve Calls' : EveCalls,
'Eve Charge' : EveCharge,
'Night Mins' : NightMins,
'Night Calls' : NightCalls,
'Night Charge' : NightCharge,
'Intl Mins' :IntlMins,
'Intl Calls' : IntlCalls,
'Intl Charge' : IntlCharge,
'CustServ Calls' : CustServCalls}, index=[0])
# evaluate the feature vector using the model
predictions = ChurnPredictor.predict(h2o.H2OFrame(newData))
predictionsOut = h2o.as_list(predictions, use_pandas=False)
prediction = predictionsOut[1][0]
probabilityChurn = predictionsOut[1][1]
probabilityRetain = predictionsOut[1][2]
return "Prediction: " + str(prediction) + " |Probability to Churn: " + str(probabilityChurn) + " |Probability to Retain: " + str(probabilityRetain) | mit |
WuShichao/computational-physics | 3/3_9/3_9.py | 1 | 2377 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 26 13:45:32 2016
Study the effects of damping by starting the pendulum with some initial
angular displacement. Using Euler-Cromer method.
@author: nightwing
"""
from math import sin,pi
import matplotlib.pyplot as plt
g = 9.8 #gravity acceleration (m/s2)
length = 9.8 #length of the rod (m)
k = g / length #g/length
dt = 0.04 #time step (s)
t_end = 200 #end time (s)
situations = [] #this list store [time, angle]
#caculate the physical pendulum
def PHYSICAL_PENDULUM(q,fd,freq,theta):
t = 0 #initial time (s)
angular_vel = 0 #initial angular velocity (rad/s)
angular_velocity = [] #this list store value of angular velocity
angle = [] #this list store value of angle
time = [] #this list store value of time
while t <= t_end:
angular_velocity.append(angular_vel)
angle.append(theta)
time.append(t)
angular_vel += (-k*sin(theta)-q*angular_vel+fd*sin(freq*t)) * dt
theta += angular_vel * dt
if theta > pi:
theta -= 2*pi
elif theta < -pi:
theta += 2*pi
t += dt
return [time,angle,angular_velocity]
#-------------caculate (Euler-Cromer method)------------
for angle in [0.5, 1.0, 1.5]:
situations.append(PHYSICAL_PENDULUM(0.1, 0, 2/3.0, angle))
#caculate time constant
for situation in range(len(situations)):
for index in range(len(situations[situation][1])):
find_time_constant = 'yes'
for k in range(1000):
if abs(situations[situation][1][index + k]) > 0.01*abs(situations[situation][1][0]):
find_time_constant = 'no'
break
if find_time_constant == 'yes':
print "time constant = %.3fs" % situations[situation][0][index]
break
#---------------graph----------------
plt.subplot(311)
plt.title("angle versus time ($\\theta$=0.5, 1.0, 1.5)")
plt.plot(situations[0][0],situations[0][1],"k-",label="$\\theta$=0.5")
plt.legend()
plt.subplot(312)
plt.plot(situations[1][0],situations[1][1],"k-",label="$\\theta$=1.0")
plt.ylabel("angle (radians)")
plt.legend()
plt.subplot(313)
plt.plot(situations[2][0],situations[2][1],"k-",label="$\\theta$=1.5")
plt.xlabel("time (s)")
plt.legend()
plt.show()
| gpl-3.0 |
dlmacedo/SVM-CNN | random_layer.py | 1 | 18874 | #-*- coding: utf8
# Author: David C. Lambert [dcl -at- panix -dot- com]
# Copyright(c) 2013
# License: Simple BSD
"""The :mod:`random_layer` module
implements Random Layer transformers.
Random layers are arrays of hidden unit activations that are
random functions of input activation values (dot products for simple
activation functions, distances from prototypes for radial basis
functions).
They are used in the implementation of Extreme Learning Machines (ELMs),
but can be used as a general input mapping.
"""
from abc import ABCMeta, abstractmethod
from math import sqrt
import numpy as np
import scipy.sparse as sp
from scipy.spatial.distance import cdist, pdist, squareform
from sklearn.metrics import pairwise_distances
from sklearn.utils import check_random_state, check_array #atleast2d_or_csr(X)
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.base import BaseEstimator, TransformerMixin
__all__ = ['RandomLayer',
'MLPRandomLayer',
'RBFRandomLayer',
'GRBFRandomLayer',
]
class BaseRandomLayer(BaseEstimator, TransformerMixin):
"""Abstract Base Class for random layers"""
__metaclass__ = ABCMeta
_internal_activation_funcs = dict()
@classmethod
def activation_func_names(cls):
"""Get list of internal activation function names"""
return cls._internal_activation_funcs.keys()
# take n_hidden and random_state, init components_ and
# input_activations_
def __init__(self, n_hidden=20, random_state=0, activation_func=None,
activation_args=None):
self.n_hidden = n_hidden
self.random_state = random_state
self.activation_func = activation_func
self.activation_args = activation_args
self.components_ = dict()
self.input_activations_ = None
# keyword args for internally defined funcs
self._extra_args = dict()
@abstractmethod
def _generate_components(self, X):
"""Generate components of hidden layer given X"""
@abstractmethod
def _compute_input_activations(self, X):
"""Compute input activations given X"""
# compute input activations and pass them
# through the hidden layer transfer functions
# to compute the transform
def _compute_hidden_activations(self, X):
"""Compute hidden activations given X"""
self._compute_input_activations(X)
acts = self.input_activations_
if (callable(self.activation_func)):
args_dict = self.activation_args if (self.activation_args) else {}
X_new = self.activation_func(acts, **args_dict)
else:
func_name = self.activation_func
func = self._internal_activation_funcs[func_name]
X_new = func(acts, **self._extra_args)
return X_new
# perform fit by generating random components based
# on the input array
def fit(self, X, y=None):
"""Generate a random hidden layer.
Parameters
----------
X : {array-like, sparse matrix} of shape [n_samples, n_features]
Training set: only the shape is used to generate random component
values for hidden units
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
self
"""
X = check_array(X) #atleast2d_or_csr(X)
self._generate_components(X)
return self
# perform transformation by calling compute_hidden_activations
# (which will normally call compute_input_activations first)
def transform(self, X, y=None):
"""Generate the random hidden layer's activations given X as input.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data to transform
y : is not used: placeholder to allow for usage in a Pipeline.
Returns
-------
X_new : numpy array of shape [n_samples, n_components]
"""
X = check_array(X)#atleast2d_or_csr(X)
if (self.components_ is None):
raise ValueError('No components initialized')
return self._compute_hidden_activations(X)
class RandomLayer(BaseRandomLayer):
"""RandomLayer is a transformer that creates a feature mapping of the
inputs that corresponds to a layer of hidden units with randomly
generated components.
The transformed values are a specified function of input activations
that are a weighted combination of dot product (multilayer perceptron)
and distance (rbf) activations:
input_activation = alpha * mlp_activation + (1-alpha) * rbf_activation
mlp_activation(x) = dot(x, weights) + bias
rbf_activation(x) = rbf_width * ||x - center||/radius
alpha and rbf_width are specified by the user
weights and biases are taken from normal distribution of
mean 0 and sd of 1
centers are taken uniformly from the bounding hyperrectangle
of the inputs, and radii are max(||x-c||)/sqrt(n_centers*2)
The input activation is transformed by a transfer function that defaults
to numpy.tanh if not specified, but can be any callable that returns an
array of the same shape as its argument (the input activation array, of
shape [n_samples, n_hidden]). Functions provided are 'sine', 'tanh',
'tribas', 'inv_tribas', 'sigmoid', 'hardlim', 'softlim', 'gaussian',
'multiquadric', or 'inv_multiquadric'.
Parameters
----------
`n_hidden` : int, optional (default=20)
Number of units to generate
`alpha` : float, optional (default=0.5)
Mixing coefficient for distance and dot product input activations:
activation = alpha*mlp_activation + (1-alpha)*rbf_width*rbf_activation
`rbf_width` : float, optional (default=1.0)
multiplier on rbf_activation
`user_components`: dictionary, optional (default=None)
dictionary containing values for components that woud otherwise be
randomly generated. Valid key/value pairs are as follows:
'radii' : array-like of shape [n_hidden]
'centers': array-like of shape [n_hidden, n_features]
'biases' : array-like of shape [n_hidden]
'weights': array-like of shape [n_features, n_hidden]
`activation_func` : {callable, string} optional (default='tanh')
Function used to transform input activation
It must be one of 'tanh', 'sine', 'tribas', 'inv_tribas',
'sigmoid', 'hardlim', 'softlim', 'gaussian', 'multiquadric',
'inv_multiquadric' or a callable. If None is given, 'tanh'
will be used.
If a callable is given, it will be used to compute the activations.
`activation_args` : dictionary, optional (default=None)
Supplies keyword arguments for a callable activation_func
`random_state` : int, RandomState instance or None (default=None)
Control the pseudo random number generator used to generate the
hidden unit weights at fit time.
Attributes
----------
`input_activations_` : numpy array of shape [n_samples, n_hidden]
Array containing dot(x, hidden_weights) + bias for all samples
`components_` : dictionary containing two keys:
`bias_weights_` : numpy array of shape [n_hidden]
`hidden_weights_` : numpy array of shape [n_features, n_hidden]
See Also
--------
"""
# triangular activation function
_tribas = (lambda x: np.clip(1.0 - np.fabs(x), 0.0, 1.0))
# inverse triangular activation function
_inv_tribas = (lambda x: np.clip(np.fabs(x), 0.0, 1.0))
# sigmoid activation function
_sigmoid = (lambda x: 1.0/(1.0 + np.exp(-x)))
# hard limit activation function
_hardlim = (lambda x: np.array(x > 0.0, dtype=float))
_softlim = (lambda x: np.clip(x, 0.0, 1.0))
# gaussian RBF
_gaussian = (lambda x: np.exp(-pow(x, 2.0)))
# multiquadric RBF
_multiquadric = (lambda x:
np.sqrt(1.0 + pow(x, 2.0)))
# inverse multiquadric RBF
_inv_multiquadric = (lambda x:
1.0/(np.sqrt(1.0 + pow(x, 2.0))))
# internal activation function table
_internal_activation_funcs = {'sine': np.sin,
'tanh': np.tanh,
'tribas': _tribas,
'inv_tribas': _inv_tribas,
'sigmoid': _sigmoid,
'softlim': _softlim,
'hardlim': _hardlim,
'gaussian': _gaussian,
'multiquadric': _multiquadric,
'inv_multiquadric': _inv_multiquadric,
}
def __init__(self, n_hidden=20, alpha=0.5, random_state=None,
activation_func='tanh', activation_args=None,
user_components=None, rbf_width=1.0):
super(RandomLayer, self).__init__(n_hidden=n_hidden,
random_state=random_state,
activation_func=activation_func,
activation_args=activation_args)
if (isinstance(self.activation_func, str)):
func_names = self._internal_activation_funcs.keys()
if (self.activation_func not in func_names):
msg = "unknown activation function '%s'" % self.activation_func
raise ValueError(msg)
self.alpha = alpha
self.rbf_width = rbf_width
self.user_components = user_components
self._use_mlp_input = (self.alpha != 0.0)
self._use_rbf_input = (self.alpha != 1.0)
def _get_user_components(self, key):
"""Look for given user component"""
try:
return self.user_components[key]
except (TypeError, KeyError):
return None
def _compute_radii(self):
"""Generate RBF radii"""
# use supplied radii if present
radii = self._get_user_components('radii')
# compute radii
if (radii is None):
centers = self.components_['centers']
n_centers = centers.shape[0]
max_dist = np.max(pairwise_distances(centers))
radii = np.ones(n_centers) * max_dist/sqrt(2.0 * n_centers)
self.components_['radii'] = radii
def _compute_centers(self, X, sparse, rs):
"""Generate RBF centers"""
# use supplied centers if present
centers = self._get_user_components('centers')
# use points taken uniformly from the bounding
# hyperrectangle
if (centers is None):
n_features = X.shape[1]
if (sparse):
fxr = range(n_features)
cols = [X.getcol(i) for i in fxr]
min_dtype = X.dtype.type(1.0e10)
sp_min = lambda col: np.minimum(min_dtype, np.min(col.data))
min_Xs = np.array(map(sp_min, cols))
max_dtype = X.dtype.type(-1.0e10)
sp_max = lambda col: np.maximum(max_dtype, np.max(col.data))
max_Xs = np.array(map(sp_max, cols))
else:
min_Xs = X.min(axis=0)
max_Xs = X.max(axis=0)
spans = max_Xs - min_Xs
ctrs_size = (self.n_hidden, n_features)
centers = min_Xs + spans * rs.uniform(0.0, 1.0, ctrs_size)
self.components_['centers'] = centers
def _compute_biases(self, rs):
"""Generate MLP biases"""
# use supplied biases if present
biases = self._get_user_components('biases')
if (biases is None):
b_size = self.n_hidden
biases = rs.normal(size=b_size)
self.components_['biases'] = biases
def _compute_weights(self, X, rs):
"""Generate MLP weights"""
# use supplied weights if present
weights = self._get_user_components('weights')
if (weights is None):
n_features = X.shape[1]
hw_size = (n_features, self.n_hidden)
weights = rs.normal(size=hw_size)
self.components_['weights'] = weights
def _generate_components(self, X):
"""Generate components of hidden layer given X"""
rs = check_random_state(self.random_state)
if (self._use_mlp_input):
self._compute_biases(rs)
self._compute_weights(X, rs)
if (self._use_rbf_input):
self._compute_centers(X, sp.issparse(X), rs)
self._compute_radii()
def _compute_input_activations(self, X):
"""Compute input activations given X"""
n_samples = X.shape[0]
mlp_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_mlp_input):
b = self.components_['biases']
w = self.components_['weights']
mlp_acts = self.alpha * (safe_sparse_dot(X, w) + b)
rbf_acts = np.zeros((n_samples, self.n_hidden))
if (self._use_rbf_input):
radii = self.components_['radii']
centers = self.components_['centers']
scale = self.rbf_width * (1.0 - self.alpha)
rbf_acts = scale * cdist(X, centers)/radii
self.input_activations_ = mlp_acts + rbf_acts
class MLPRandomLayer(RandomLayer):
"""Wrapper for RandomLayer with alpha (mixing coefficient) set
to 1.0 for MLP activations only"""
def __init__(self, n_hidden=20, random_state=None,
activation_func='tanh', activation_args=None,
weights=None, biases=None):
user_components = {'weights': weights, 'biases': biases}
super(MLPRandomLayer, self).__init__(n_hidden=n_hidden,
random_state=random_state,
activation_func=activation_func,
activation_args=activation_args,
user_components=user_components,
alpha=1.0)
class RBFRandomLayer(RandomLayer):
"""Wrapper for RandomLayer with alpha (mixing coefficient) set
to 0.0 for RBF activations only"""
def __init__(self, n_hidden=20, random_state=None,
activation_func='gaussian', activation_args=None,
centers=None, radii=None, rbf_width=1.0):
user_components = {'centers': centers, 'radii': radii}
super(RBFRandomLayer, self).__init__(n_hidden=n_hidden,
random_state=random_state,
activation_func=activation_func,
activation_args=activation_args,
user_components=user_components,
rbf_width=rbf_width,
alpha=0.0)
class GRBFRandomLayer(RBFRandomLayer):
"""Random Generalized RBF Hidden Layer transformer
Creates a layer of radial basis function units where:
f(a), s.t. a = ||x-c||/r
with c the unit center
and f() is exp(-gamma * a^tau) where tau and r are computed
based on [1]
Parameters
----------
`n_hidden` : int, optional (default=20)
Number of units to generate, ignored if centers are provided
`grbf_lambda` : float, optional (default=0.05)
GRBF shape parameter
`gamma` : {int, float} optional (default=1.0)
Width multiplier for GRBF distance argument
`centers` : array of shape (n_hidden, n_features), optional (default=None)
If provided, overrides internal computation of the centers
`radii` : array of shape (n_hidden), optional (default=None)
If provided, overrides internal computation of the radii
`use_exemplars` : bool, optional (default=False)
If True, uses random examples from the input to determine the RBF
centers, ignored if centers are provided
`random_state` : int or RandomState instance, optional (default=None)
Control the pseudo random number generator used to generate the
centers at fit time, ignored if centers are provided
Attributes
----------
`components_` : dictionary containing two keys:
`radii_` : numpy array of shape [n_hidden]
`centers_` : numpy array of shape [n_hidden, n_features]
`input_activations_` : numpy array of shape [n_samples, n_hidden]
Array containing ||x-c||/r for all samples
See Also
--------
ELMRegressor, ELMClassifier, SimpleELMRegressor, SimpleELMClassifier,
SimpleRandomLayer
References
----------
.. [1] Fernandez-Navarro, et al, "MELM-GRBF: a modified version of the
extreme learning machine for generalized radial basis function
neural networks", Neurocomputing 74 (2011), 2502-2510
"""
# def _grbf(acts, taus):
# """GRBF activation function"""
# return np.exp(np.exp(-pow(acts, taus)))
_grbf = (lambda acts, taus: np.exp(np.exp(-pow(acts, taus))))
_internal_activation_funcs = {'grbf': _grbf}
def __init__(self, n_hidden=20, grbf_lambda=0.001,
centers=None, radii=None, random_state=None):
super(GRBFRandomLayer, self).__init__(n_hidden=n_hidden,
activation_func='grbf',
centers=centers, radii=radii,
random_state=random_state)
self.grbf_lambda = grbf_lambda
self.dN_vals = None
self.dF_vals = None
self.tau_vals = None
# get centers from superclass, then calculate tau_vals
# according to ref [1]
def _compute_centers(self, X, sparse, rs):
"""Generate centers, then compute tau, dF and dN vals"""
super(GRBFRandomLayer, self)._compute_centers(X, sparse, rs)
centers = self.components_['centers']
sorted_distances = np.sort(squareform(pdist(centers)))
self.dF_vals = sorted_distances[:, -1]
self.dN_vals = sorted_distances[:, 1]/100.0
#self.dN_vals = 0.0002 * np.ones(self.dF_vals.shape)
tauNum = np.log(np.log(self.grbf_lambda) /
np.log(1.0 - self.grbf_lambda))
tauDenom = np.log(self.dF_vals/self.dN_vals)
self.tau_vals = tauNum/tauDenom
self._extra_args['taus'] = self.tau_vals
# get radii according to ref [1]
def _compute_radii(self):
"""Generate radii"""
denom = pow(-np.log(self.grbf_lambda), 1.0/self.tau_vals)
self.components_['radii'] = self.dF_vals/denom
| apache-2.0 |
henrykironde/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
kobejean/tensorflow | tensorflow/contrib/timeseries/examples/multivariate.py | 10 | 5155 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A multivariate TFTS example.
Fits a multivariate model, exports it, and visualizes the learned correlations
by iteratively predicting and sampling from the predictions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import tempfile
import numpy
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_level.csv")
def multivariate_train_and_sample(
csv_file_name=_DATA_FILE, export_directory=None, training_steps=500):
"""Trains, evaluates, and exports a multivariate model."""
estimator = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=[], num_features=5)
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
# Larger window sizes generally produce a better covariance matrix.
reader, batch_size=4, window_size=64)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
current_state = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
values = [current_state["observed"]]
times = [current_state[tf.contrib.timeseries.FilteringResults.TIMES]]
# Export the model so we can do iterative prediction and filtering without
# reloading model checkpoints.
if export_directory is None:
export_directory = tempfile.mkdtemp()
input_receiver_fn = estimator.build_raw_serving_input_receiver_fn()
export_location = estimator.export_savedmodel(
export_directory, input_receiver_fn)
with tf.Graph().as_default():
numpy.random.seed(1) # Make the example a bit more deterministic
with tf.Session() as session:
signatures = tf.saved_model.loader.load(
session, [tf.saved_model.tag_constants.SERVING], export_location)
for _ in range(100):
current_prediction = (
tf.contrib.timeseries.saved_model_utils.predict_continuation(
continue_from=current_state, signatures=signatures,
session=session, steps=1))
next_sample = numpy.random.multivariate_normal(
# Squeeze out the batch and series length dimensions (both 1).
mean=numpy.squeeze(current_prediction["mean"], axis=(0, 1)),
cov=numpy.squeeze(current_prediction["covariance"], axis=(0, 1)))
# Update model state so that future predictions are conditional on the
# value we just sampled.
filtering_features = {
tf.contrib.timeseries.TrainEvalFeatures.TIMES: current_prediction[
tf.contrib.timeseries.FilteringResults.TIMES],
tf.contrib.timeseries.TrainEvalFeatures.VALUES: next_sample[
None, None, :]}
current_state = (
tf.contrib.timeseries.saved_model_utils.filter_continuation(
continue_from=current_state,
session=session,
signatures=signatures,
features=filtering_features))
values.append(next_sample[None, None, :])
times.append(current_state["times"])
all_observations = numpy.squeeze(numpy.concatenate(values, axis=1), axis=0)
all_times = numpy.squeeze(numpy.concatenate(times, axis=1), axis=0)
return all_times, all_observations
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
all_times, all_observations = multivariate_train_and_sample()
# Show where sampling starts on the plot
pyplot.axvline(1000, linestyle="dotted")
pyplot.plot(all_times, all_observations)
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
arabenjamin/scikit-learn | benchmarks/bench_glmnet.py | 297 | 3848 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
kdebrab/pandas | pandas/tests/frame/test_duplicates.py | 1 | 13948 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
import numpy as np
from pandas.compat import lrange, string_types
from pandas import DataFrame, Series
import pandas.util.testing as tm
@pytest.mark.parametrize('subset', ['a', ['a'], ['a', 'B']])
def test_duplicated_with_misspelled_column_name(subset):
# GH 19730
df = DataFrame({'A': [0, 0, 1],
'B': [0, 0, 1],
'C': [0, 0, 1]})
with pytest.raises(KeyError):
df.duplicated(subset)
with pytest.raises(KeyError):
df.drop_duplicates(subset)
@pytest.mark.slow
def test_duplicated_do_not_fail_on_wide_dataframes():
# gh-21524
# Given the wide dataframe with a lot of columns
# with different (important!) values
data = {'col_{0:02d}'.format(i): np.random.randint(0, 1000, 30000)
for i in range(100)}
df = DataFrame(data).T
result = df.duplicated()
# Then duplicates produce the bool Series as a result and don't fail during
# calculation. Actual values doesn't matter here, though usually it's all
# False in this case
assert isinstance(result, Series)
assert result.dtype == np.bool
@pytest.mark.parametrize('keep, expected', [
('first', Series([False, False, True, False, True])),
('last', Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True]))
])
def test_duplicated_keep(keep, expected):
df = DataFrame({'A': [0, 1, 1, 2, 0], 'B': ['a', 'b', 'b', 'c', 'a']})
result = df.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
@pytest.mark.xfail(reason="GH21720; nan/None falsely considered equal")
@pytest.mark.parametrize('keep, expected', [
('first', Series([False, False, True, False, True])),
('last', Series([True, True, False, False, False])),
(False, Series([True, True, True, False, True]))
])
def test_duplicated_nan_none(keep, expected):
df = DataFrame({'C': [np.nan, 3, 3, None, np.nan]}, dtype=object)
result = df.duplicated(keep=keep)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('keep', ['first', 'last', False])
@pytest.mark.parametrize('subset', [None, ['A', 'B'], 'A'])
def test_duplicated_subset(subset, keep):
df = DataFrame({'A': [0, 1, 1, 2, 0],
'B': ['a', 'b', 'b', 'c', 'a'],
'C': [np.nan, 3, 3, None, np.nan]})
if subset is None:
subset = list(df.columns)
elif isinstance(subset, string_types):
# need to have a DataFrame, not a Series
# -> select columns with singleton list, not string
subset = [subset]
expected = df[subset].duplicated(keep=keep)
result = df.duplicated(keep=keep, subset=subset)
tm.assert_series_equal(result, expected)
def test_drop_duplicates():
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.loc[[]]
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(['AAA', 'B']))
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'])
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep='last')
expected = df.loc[[0, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep=False)
expected = df.loc[[0]]
tm.assert_frame_equal(result, expected)
# consider everything
df2 = df.loc[:, ['AAA', 'B', 'C']]
result = df2.drop_duplicates()
# in this case only
expected = df2.drop_duplicates(['AAA', 'B'])
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep='last')
expected = df2.drop_duplicates(['AAA', 'B'], keep='last')
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep=False)
expected = df2.drop_duplicates(['AAA', 'B'], keep=False)
tm.assert_frame_equal(result, expected)
# integers
result = df.drop_duplicates('C')
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
df['E'] = df['C'].astype('int8')
result = df.drop_duplicates('E')
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('E', keep='last')
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
# GH 11376
df = DataFrame({'x': [7, 6, 3, 3, 4, 8, 0],
'y': [0, 6, 5, 5, 9, 1, 2]})
expected = df.loc[df.index != 3]
tm.assert_frame_equal(df.drop_duplicates(), expected)
df = DataFrame([[1, 0], [0, 2]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = DataFrame([[-2, 0], [0, -4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
x = np.iinfo(np.int64).max / 3 * 2
df = DataFrame([[-x, x], [0, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = DataFrame([[-x, x], [x, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
# GH 11864
df = DataFrame([i] * 9 for i in range(16))
df = df.append([[1] + [0] * 8], ignore_index=True)
for keep in ['first', 'last', False]:
assert df.duplicated(keep=keep).sum() == 0
def test_drop_duplicates_with_duplicate_column_names():
# GH17836
df = DataFrame([
[1, 2, 5],
[3, 4, 6],
[3, 4, 7]
], columns=['a', 'a', 'b'])
result0 = df.drop_duplicates()
tm.assert_frame_equal(result0, df)
result1 = df.drop_duplicates('a')
expected1 = df[:2]
tm.assert_frame_equal(result1, expected1)
def test_drop_duplicates_for_take_all():
df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar',
'foo', 'bar', 'qux', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.iloc[[2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.iloc[[2, 6]]
tm.assert_frame_equal(result, expected)
# multiple columns
result = df.drop_duplicates(['AAA', 'B'])
expected = df.iloc[[0, 1, 2, 3, 4, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep='last')
expected = df.iloc[[0, 1, 2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep=False)
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_tuple():
df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates(('AA', 'AB'))
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep='last')
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep=False)
expected = df.loc[[]] # empty df
assert len(result) == 0
tm.assert_frame_equal(result, expected)
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates((('AA', 'AB'), 'B'))
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_NA():
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('A')
expected = df.loc[[0, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.loc[[1, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(['A', 'B'])
expected = df.loc[[0, 2, 3, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep='last')
expected = df.loc[[1, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep=False)
expected = df.loc[[6]]
tm.assert_frame_equal(result, expected)
# nan
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('C')
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.loc[[3, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(['C', 'B'])
expected = df.loc[[0, 1, 2, 4]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep='last')
expected = df.loc[[1, 3, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep=False)
expected = df.loc[[1]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_NA_for_take_all():
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'baz', 'bar', 'qux'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]})
# single column
result = df.drop_duplicates('A')
expected = df.iloc[[0, 2, 3, 5, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.iloc[[1, 4, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.iloc[[5, 7]]
tm.assert_frame_equal(result, expected)
# nan
# single column
result = df.drop_duplicates('C')
expected = df.iloc[[0, 1, 5, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[3, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.iloc[[5, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_inplace():
orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
df = orig.copy()
df.drop_duplicates('A', inplace=True)
expected = orig[:2]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep='last', inplace=True)
expected = orig.loc[[6, 7]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep=False, inplace=True)
expected = orig.loc[[]]
result = df
tm.assert_frame_equal(result, expected)
assert len(df) == 0
# multi column
df = orig.copy()
df.drop_duplicates(['A', 'B'], inplace=True)
expected = orig.loc[[0, 1, 2, 3]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep='last', inplace=True)
expected = orig.loc[[0, 5, 6, 7]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep=False, inplace=True)
expected = orig.loc[[0]]
result = df
tm.assert_frame_equal(result, expected)
# consider everything
orig2 = orig.loc[:, ['A', 'B', 'C']].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
# in this case only
expected = orig2.drop_duplicates(['A', 'B'])
result = df2
tm.assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep='last', inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep='last')
result = df2
tm.assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep=False, inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep=False)
result = df2
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
ArdenB/TSSRESTREND | NCdemo/S03_MappingResults.py | 1 | 16577 | # ==============================================================================
__title__ = "csv to netcdf and maps"
__author__ = "Arden Burrell"
__version__ = "v1.0(23.06.2020)"
__email__ = "aburrell@whrc.org"
# ==============================================================================
import os
import sys
# ===== CHange the dir to the script location =====
if not os.path.dirname(sys.argv[0]) == "":
os.chdir(os.path.dirname(sys.argv[0]))
# ===== append that to the system path =====
sys.path.append(os.getcwd())
# ========== Import packages ==========
import numpy as np
import pandas as pd
import argparse
# import datetime as dt
import warnings as warn
import xarray as xr
import dask
import bottleneck as bn
from numba import jit
from collections import OrderedDict
import json
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import palettable
import statsmodels.stats.multitest as smsM
import scipy.stats as sps
# ========== Load custom functions ==========
import CustomFunctions as cf
# ==============================================================================
def main(args):
# =========== Read the metadata file in ==========
if args.use_archived is None:
infofile = './results/infomation.json'
else:
'./results/archive/infomation_%02d.json' % args.use_archived
with open(infofile, "r+", errors='ignore') as f:
info = json.load(f)
# ========== fix the timedelta ==========
if type(info["ComputeTime"]) == float:
info["ComputeTime"] = pd.Timedelta(info["ComputeTime"], unit="sec")
# ========== Open the csv results file ==========
fn = "./results/AttributionResults.csv"
df = pd.read_csv(fn, index_col=0)
# ========== Fix the indexing and the error messaging ==========
lonlat = np.array([_unpackLatLon(index) for index in df.index])
df["longitude"] = lonlat[:, 0]
df["latitude" ] = lonlat[:, 1]
df = df.reset_index(drop=True).set_index(["longitude","latitude" ])
df["errors"] = df["errors"].apply(_df_error)
df["AnthropogenicClimateChange"] = df.ClimateChange + df.CO2
# ========== function to calculate adjusted p values for ACC ==========
pstack = np.stack((df["ClimateChange.Pvalue"].values, df["CO2.Pvalue"].values))
df["AnthropogenicClimateChange.Pvalue"]= np.apply_along_axis(_combine_pvalue2d, 0, pstack)
# ========== Open a reference dataset ==========
# This is a hack so i don't have to harrd code all the infomation
fnNDVI = "./data/AUSdemo_GIMMS_ndvi.nc"
ds_ref = xr.open_dataset(fnNDVI)
# ========== Convert back to xr dataset ==========
ds = xr.Dataset.from_dataframe(df)
# ++++++++++ Fix the time ++++++++++
# add a time dim, this case the end of the original timeseris.
# Netcdf files need a time dimenstion
ds = ds.assign_coords(time=pd.Timestamp("2015-12-31")).expand_dims("time")
ds = ds.transpose('time', 'latitude', 'longitude').sortby("latitude", ascending=False)
# ++++++++++ Fix the Global Attributes ++++++++++
ds = GlobalAttributes(ds, info, ds_ref=ds_ref)
# ++++++++++ Setup the netcdf file encoding and add ++++++++++
encoding = OrderedDict()
for va in df.columns:
# ========== Check and see if all th e values are nan, if yes drop them ==========
if bn.allnan(ds[va]):
ds = ds.drop_vars(va)
else:
encoding[va] = ({'shuffle':True,
'zlib':True,
'complevel':5.
# 'chunksizes':[1, ensinfo.lats.shape[0], 100],
})
# ========== Write the dataset to a netcdf file ==========
print("Starting write of data at:", pd.Timestamp.now())
ds.to_netcdf("./results/TSSRattribution_Results_%s_%dkm.nc" % (info["photo"], (25*(info["coarsen"]-1)+25)),
format = 'NETCDF4',
encoding = encoding,
unlimited_dims = ["time"])
# ========== Make a series of maps ==========
if args.plots:
# ========== set the plot ==========
plotpath = "./results/plots/"
for va in ds.variables:
# +++++ check its a variable to be skipped or mapped +++++
if (va in ['longitude', 'latitude', "OtherFactorsValid", "errors", "time"] or va.endswith(".Pvalue")):
continue
# ========== Perform significance correction ==========
if args.sig:
signif, s_method = FDRSignificanceCorrection(ds, va, args.method)
else:
s_method = ""
signif = None
# ========== Create the metadata for the plots ==========
maininfo = "Plot from %s (%s):%s by %s, %s" % (__title__, __file__,
__version__, __author__, str(pd.Timestamp.now()))
maininfo += s_method
gitinfo = cf.gitmetadata()
print(va)
# ========== Build, save and show the maps ==========
MapSetup(ds, va, signif, maininfo, gitinfo, info, plotpath)
# ==============================================================================
# BUILD THE MAPS
# ==============================================================================
def MapSetup(ds, va, signif, maininfo, gitinfo, info, plotpath, mask=None):
"""
Builds the maps of the change attribution
args:
ensinfo
ds xrds: TSSRESTREND RESULTS
va str : the name of the change variable
maininfo str : plot string
gitinfo str : summary of the git header
mask xrda: Any additional boolean masks
"""
# ========== Create the mapdet object ==========
mapdet = cf.mapclass("Australia", plotpath)
mapdet.dpi = 130
mapdet.var = va
mapdet.desc = "%s_%s_%dkm" % (va, info["photo"], (25*(info["coarsen"]-1)+25))
# ========== Create the data array ==========
da = ds[va].copy()
ad = da.squeeze()
# ========== Remove the non dryland areas ==========
if not (mask is None):
da *= mask
# ========== Mask for FDR significnace ==========
if not signif is None:
da *= signif
mapdet.desc += "_withNonSigMasked"
# ========== calculate the tick position ==========
if info['annual']:
mapdet.cblabel = r"$\times$10$^{-2}$ $\Delta$ NDVI$_{max}$ yr$^{-1}$"
# scale the NDVI to make it fit
da *= 1e2
else:
mapdet.cblabel = r'$\Delta$ NDVI$_{max}$'
if info['Nyears'] != 34.:
warn.warn("The plot tick values were chosen for a time series of 34 years. Other lengths may require differen colorbars")
tks = np.array([-0.30, -0.24, -0.12, -0.06, -0.02, 0, 0.02, 0.06, 0.12, 0.24, 0.30])
mapdet.cmin = -0.3 #-0.1 # the min of the colormap
mapdet.cmax = 0.3#0.1 # the max of the colormap
# ========== Set the colormap ==========
if va == "ObservedChange":
cmapHex = palettable.colorbrewer.diverging.PRGn_10.hex_colors
elif va in ["ObservedClimate", "ClimateChange"]:
cmapHex = palettable.colorbrewer.diverging.BrBG_10.hex_colors
elif va == "ClimateVariability":
cmapHex = palettable.colorbrewer.diverging.RdBu_10.hex_colors
elif va == "CO2":
tks = np.array([0, 0.001, 0.02, 0.03, 0.04, 0.06, 0.24, 0.30])
cmapHex = palettable.colorbrewer.sequential.Purples_7.hex_colors
mapdet.cmin = 0.0#-0.1 # the min of the colormap
elif va == "LandUse":
cmapHex = palettable.colorbrewer.diverging.PiYG_10.hex_colors
elif va == "OtherFactors":
# cmapHex = palettable.colorbrewer.diverging.RdBu_10.hex_colors
cmapHex = palettable.cmocean.diverging.Curl_10_r.hex_colors
elif va == "AnthropogenicClimateChange":
cmapHex = palettable.colorbrewer.diverging.PuOr_10.hex_colors
# ========== Set the variables ==========
mapdet.cZero = 0.5 # the zero point of the colormap
# ========== Add the Font info ==========
mapdet.gridalp = 0.5
# ========== Setup the cmap ==========
if va == "CO2":
spacing = 'uniform'
cbounds = tks
ticks = tks
# ========== create the colormap ==========
cmap = mpl.colors.ListedColormap(cmapHex)
cmap.set_over(cmapHex[-1])
norm = mpl.colors.BoundaryNorm(cbounds, cmap.N)
mapdet.extend = "max"
else:
cmap, norm, ticks, cbounds, spacing = cf.ReplaceHexColor(
cmapHex, mapdet.cmin, mapdet.cmax, ticks=tks, zeroR = 0.001)
# print(cbounds)
# cmap, norm, ticks, cbounds, spacing = pf.ReplaceHexColor(
# cmapHex, mapdet.cmin, mapdet.cmax, ticks=tks, zeroR = 0.001)
cmap.set_bad('dimgrey',1.)
mapdet.cmap = cmap
mapdet.norm = norm
mapdet.ticks = ticks
mapdet.cbounds = cbounds
mapdet.spacing = spacing
# ========== Make a map ==========
plotinfo, fname = cf.mapmaker(da, mapdet)
# ========== Save the metadata ==========
if fname:
infomation = [maininfo, plotinfo, fname, gitinfo]
cf.writemetadata(fname, infomation)
# ipdb.set_trace()
def FDRSignificanceCorrection(ds, var, FDRmethod, alpha = 0.10, ):
"""
Takes the results of an existing trend detection aproach and modifies them to
account for multiple comparisons.
args
ds: xr dataset
list of numpy arrays containing results of trend analysis
kys: list
list of what is in results
years:
years of accumulation
"""
if FDRmethod == "fdr_by":
t_method = "Adjusting for multiple comparisons using Benjamini/Yekutieli"
elif FDRmethod == "fdr_bh":
t_method = "Adjusting for multiple comparisons using Benjamini/Hochberg"
else:
raise ValueError("unknown MultipleComparisons method: %s, must be either fdr_by or fdr_bh " % FDRmethod)
# ========== Work out the pvalue name ===========
if var == "ObservedChange":
pnm = "Obs.Pvalue"
elif var == "OtherFactors":
t_method = "OtherFactors is Zero masked in areas where 1 or more components could not be calculated"
print(t_method)
return ds["OtherFactorsValid"].astype(float), t_method
else:
pnm = var + ".Pvalue"
# ========== Locate the p values and reshape them into a 1d array ==========
# ++++++++++ Find the pvalues ++++++++++
try:
ODimDa = ds[pnm].stack(loc = ('time', 'latitude', 'longitude')).copy()
except:
breakpoint()
isnan = np.isnan(ODimDa)
# ++++++++++ pull out the non nan pvalus ++++++++++
pvalue1d = ODimDa[~isnan]
# =========== Perform the MC correction ===========
pvalue_adj = smsM.multipletests(pvalue1d, method=FDRmethod, alpha=alpha)
# =========== Get the significance in bool ===========
sigfloat = pvalue_adj[0].astype(float)
# make an empty dataarray
re = xr.zeros_like(ODimDa)
re[~isnan] = sigfloat
print(t_method)
return re.unstack(), t_method
# ==============================================================================
# FIX THE METADATA OF THE XR DATASET
# ==============================================================================
def GlobalAttributes(ds, info, ds_ref=None):
"""
Creates the global attributes for the netcdf file that is being written
these attributes come from :
https://www.unidata.ucar.edu/software/thredds/current/netcdf-java/metadata/DataDiscoveryAttConvention.html
args
ds: xarray ds
Dataset containing the infomation im intepereting
ds_ref: xarray ds
Dataset that contains the original data to copy metadata from. Defualt is None so it can be skipped
returns:
attributes Ordered Dictionary cantaining the attribute infomation
"""
# ========== Create the ordered dictionary ==========
attr = ds.attrs
# fetch the references for my publications
# pubs = puplications()
# ========== Fill the Dictionary ==========
# ++++++++++ Highly recomended ++++++++++
attr["title"] = "TSS-RESTREND Attribution Results"
attr["summary"] = "The restacked results of a TSSRattribution function applied to GIMMS and TERRCLIMATE DATA"
attr["Conventions"] = "CF-1.7"
# ++++++++++ Data Provinance ++++++++++
info["history"] = "%s: Netcdf file created using %s (%s):%s. Script originally developed by %s" % (
str(pd.Timestamp.now()), __title__, __file__, __version__, __author__) + info["history"]
# attr["history"] += ds.history
attr["history"] = info["history"]
attr["creator_name"] = __author__
attr["creator_url"] = "ardenburrell.com"
attr["creator_email"] = __email__
attr["institution"] = "WHRC"
attr["date_created"] = str(pd.Timestamp.now())
# ++++++++++ Netcdf Summary infomation ++++++++++
attr["time_coverage_start"] = str(pd.Timestamp("2015-12-31"))
attr["time_coverage_end"] = str(pd.Timestamp("1982-01-01"))
# ++++++++++ TSS.RESTREND INFOMATION ++++++++++
attr["package_version"] = "TSSRESTREND version: " + info["TSSRESTREND.version"]
attr["package_url"] = "https://cran.r-project.org/web/packages/TSS.RESTREND/index.html"
if not ds_ref is None:
ds.longitude.attrs = ds_ref.longitude.attrs
ds.latitude.attrs = ds_ref.latitude.attrs
ds.time.attrs = ds_ref.time.attrs
# breakpoint()
# ds.time.attrs["calendar"] = 'standard'
# ds.time.attrs["units"] = 'days since 1900-01-01 00:00'
return ds
# ==============================================================================
# FUNCTIONS FOR FIXING THE INPUT DATAFRAME
# ==============================================================================
def _unpackLatLon(index):
"""
Fixes the string formatting of the indexs
args:
index: str
string of format "(longitude, latitude)""
return:
longitude, latitude: Floats
"""
# fix the latitude and longitude
lonST, latST = index.split(", ")
return float(lonST[1:]), float(latST[:-1])
def _df_error(error):
"""
Takes in the error codes which are catogorical and encodes them into integers
"""
# ========== Keys for error messages ==========
keys = ({
"NANinCTSR.VI":1,
"NANinCTSR.RF":2,
"NANinCTSR.TM":3,
"AttributionFailed":4
})
# breakpoint()
if error in keys.keys():
return keys[error]
elif np.isnan(error):
return 0
else:
# unknow errror
return 5
def _combine_pvalue2d(pvals):
"""
takes an array and removes nans then combines
args:
pvalues: np.array
"""
if bn.allnan(pvals):
return np.NAN
elif np.sum(pvals) == 0:
return 0
else:
___, pv = sps.combine_pvalues(pvals[~np.isnan(pvals)])
return pv
#===================
# ==============================================================================
if __name__ == '__main__':
description='Arguments that can be passed to the saving and plotting script'
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-p", "--plots", action="store_false",
help="Quiet plots: if selected plots will not be displayed ")
parser.add_argument(
"-s", "--sig", action="store_true",
help="Significance: Apply a zero mask using FDR adjustment and the Benjamini/Hochberg method")
parser.add_argument(
"--method", type=str,choices=["fdr_bh", 'fdr_by'], default="fdr_bh", help="The method used to adjust for False Discovery Rate. must be fdr_bh or fdr_by")
parser.add_argument(
"--use_archived", type=int, default=None, help="Use this argument to redo archived infomation.json files")
args = parser.parse_args()
main(args)
| mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/ipykernel/inprocess/ipkernel.py | 5 | 6838 | """An in-process kernel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from contextlib import contextmanager
import logging
import sys
from IPython.core.interactiveshell import InteractiveShellABC
from ipykernel.jsonutil import json_clean
from traitlets import Any, Enum, Instance, List, Type, default
from ipykernel.ipkernel import IPythonKernel
from ipykernel.zmqshell import ZMQInteractiveShell
from .constants import INPROCESS_KEY
from .socket import DummySocket
from ..iostream import OutStream, BackgroundSocket, IOPubThread
#-----------------------------------------------------------------------------
# Main kernel class
#-----------------------------------------------------------------------------
class InProcessKernel(IPythonKernel):
#-------------------------------------------------------------------------
# InProcessKernel interface
#-------------------------------------------------------------------------
# The frontends connected to this kernel.
frontends = List(
Instance('ipykernel.inprocess.client.InProcessKernelClient',
allow_none=True)
)
# The GUI environment that the kernel is running under. This need not be
# specified for the normal operation for the kernel, but is required for
# IPython's GUI support (including pylab). The default is 'inline' because
# it is safe under all GUI toolkits.
gui = Enum(('tk', 'gtk', 'wx', 'qt', 'qt4', 'inline'),
default_value='inline')
raw_input_str = Any()
stdout = Any()
stderr = Any()
#-------------------------------------------------------------------------
# Kernel interface
#-------------------------------------------------------------------------
shell_class = Type(allow_none=True)
shell_streams = List()
control_stream = Any()
_underlying_iopub_socket = Instance(DummySocket, ())
iopub_thread = Instance(IOPubThread)
@default('iopub_thread')
def _default_iopub_thread(self):
thread = IOPubThread(self._underlying_iopub_socket)
thread.start()
return thread
iopub_socket = Instance(BackgroundSocket)
@default('iopub_socket')
def _default_iopub_socket(self):
return self.iopub_thread.background_socket
stdin_socket = Instance(DummySocket, ())
def __init__(self, **traits):
super(InProcessKernel, self).__init__(**traits)
self._underlying_iopub_socket.observe(self._io_dispatch, names=['message_sent'])
self.shell.kernel = self
def execute_request(self, stream, ident, parent):
""" Override for temporary IO redirection. """
with self._redirected_io():
super(InProcessKernel, self).execute_request(stream, ident, parent)
def start(self):
""" Override registration of dispatchers for streams. """
self.shell.exit_now = False
def _abort_queue(self, stream):
""" The in-process kernel doesn't abort requests. """
pass
def _input_request(self, prompt, ident, parent, password=False):
# Flush output before making the request.
self.raw_input_str = None
sys.stderr.flush()
sys.stdout.flush()
# Send the input request.
content = json_clean(dict(prompt=prompt, password=password))
msg = self.session.msg(u'input_request', content, parent)
for frontend in self.frontends:
if frontend.session.session == parent['header']['session']:
frontend.stdin_channel.call_handlers(msg)
break
else:
logging.error('No frontend found for raw_input request')
return str()
# Await a response.
while self.raw_input_str is None:
frontend.stdin_channel.process_events()
return self.raw_input_str
#-------------------------------------------------------------------------
# Protected interface
#-------------------------------------------------------------------------
@contextmanager
def _redirected_io(self):
""" Temporarily redirect IO to the kernel.
"""
sys_stdout, sys_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = self.stdout, self.stderr
yield
sys.stdout, sys.stderr = sys_stdout, sys_stderr
#------ Trait change handlers --------------------------------------------
def _io_dispatch(self, change):
""" Called when a message is sent to the IO socket.
"""
ident, msg = self.session.recv(self.iopub_socket, copy=False)
for frontend in self.frontends:
frontend.iopub_channel.call_handlers(msg)
#------ Trait initializers -----------------------------------------------
@default('log')
def _default_log(self):
return logging.getLogger(__name__)
@default('session')
def _default_session(self):
from jupyter_client.session import Session
return Session(parent=self, key=INPROCESS_KEY)
@default('shell_class')
def _default_shell_class(self):
return InProcessInteractiveShell
@default('stdout')
def _default_stdout(self):
return OutStream(self.session, self.iopub_thread, u'stdout')
@default('stderr')
def _default_stderr(self):
return OutStream(self.session, self.iopub_thread, u'stderr')
#-----------------------------------------------------------------------------
# Interactive shell subclass
#-----------------------------------------------------------------------------
class InProcessInteractiveShell(ZMQInteractiveShell):
kernel = Instance('ipykernel.inprocess.ipkernel.InProcessKernel',
allow_none=True)
#-------------------------------------------------------------------------
# InteractiveShell interface
#-------------------------------------------------------------------------
def enable_gui(self, gui=None):
"""Enable GUI integration for the kernel."""
from ipykernel.eventloops import enable_gui
if not gui:
gui = self.kernel.gui
return enable_gui(gui, kernel=self.kernel)
def enable_matplotlib(self, gui=None):
"""Enable matplotlib integration for the kernel."""
if not gui:
gui = self.kernel.gui
return super(InProcessInteractiveShell, self).enable_matplotlib(gui)
def enable_pylab(self, gui=None, import_all=True, welcome_message=False):
"""Activate pylab support at runtime."""
if not gui:
gui = self.kernel.gui
return super(InProcessInteractiveShell, self).enable_pylab(gui, import_all,
welcome_message)
InteractiveShellABC.register(InProcessInteractiveShell)
| mit |
kazemakase/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
shahankhatch/scikit-learn | examples/applications/plot_out_of_core_classification.py | 255 | 13919 | """
======================================================
Out-of-core classification of text documents
======================================================
This is an example showing how scikit-learn can be used for classification
using an out-of-core approach: learning from data that doesn't fit into main
memory. We make use of an online classifier, i.e., one that supports the
partial_fit method, that will be fed with batches of examples. To guarantee
that the features space remains the same over time we leverage a
HashingVectorizer that will project each example into the same feature space.
This is especially useful in the case of text classification where new
features (words) may appear in each batch.
The dataset used in this example is Reuters-21578 as provided by the UCI ML
repository. It will be automatically downloaded and uncompressed on first run.
The plot represents the learning curve of the classifier: the evolution
of classification accuracy over the course of the mini-batches. Accuracy is
measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before
feeding them to the learner.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# @FedericoV <https://github.com/FedericoV/>
# License: BSD 3 clause
from __future__ import print_function
from glob import glob
import itertools
import os.path
import re
import tarfile
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from sklearn.externals.six.moves import html_parser
from sklearn.externals.six.moves import urllib
from sklearn.datasets import get_data_home
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import Perceptron
from sklearn.naive_bayes import MultinomialNB
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
###############################################################################
# Reuters Dataset related routines
###############################################################################
class ReutersParser(html_parser.HTMLParser):
"""Utility class to parse a SGML file and yield documents one at a time."""
def __init__(self, encoding='latin-1'):
html_parser.HTMLParser.__init__(self)
self._reset()
self.encoding = encoding
def handle_starttag(self, tag, attrs):
method = 'start_' + tag
getattr(self, method, lambda x: None)(attrs)
def handle_endtag(self, tag):
method = 'end_' + tag
getattr(self, method, lambda: None)()
def _reset(self):
self.in_title = 0
self.in_body = 0
self.in_topics = 0
self.in_topic_d = 0
self.title = ""
self.body = ""
self.topics = []
self.topic_d = ""
def parse(self, fd):
self.docs = []
for chunk in fd:
self.feed(chunk.decode(self.encoding))
for doc in self.docs:
yield doc
self.docs = []
self.close()
def handle_data(self, data):
if self.in_body:
self.body += data
elif self.in_title:
self.title += data
elif self.in_topic_d:
self.topic_d += data
def start_reuters(self, attributes):
pass
def end_reuters(self):
self.body = re.sub(r'\s+', r' ', self.body)
self.docs.append({'title': self.title,
'body': self.body,
'topics': self.topics})
self._reset()
def start_title(self, attributes):
self.in_title = 1
def end_title(self):
self.in_title = 0
def start_body(self, attributes):
self.in_body = 1
def end_body(self):
self.in_body = 0
def start_topics(self, attributes):
self.in_topics = 1
def end_topics(self):
self.in_topics = 0
def start_d(self, attributes):
self.in_topic_d = 1
def end_d(self):
self.in_topic_d = 0
self.topics.append(self.topic_d)
self.topic_d = ""
def stream_reuters_documents(data_path=None):
"""Iterate over documents of the Reuters dataset.
The Reuters archive will automatically be downloaded and uncompressed if
the `data_path` directory does not exist.
Documents are represented as dictionaries with 'body' (str),
'title' (str), 'topics' (list(str)) keys.
"""
DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'
'reuters21578-mld/reuters21578.tar.gz')
ARCHIVE_FILENAME = 'reuters21578.tar.gz'
if data_path is None:
data_path = os.path.join(get_data_home(), "reuters")
if not os.path.exists(data_path):
"""Download the dataset."""
print("downloading dataset (once and for all) into %s" %
data_path)
os.mkdir(data_path)
def progress(blocknum, bs, size):
total_sz_mb = '%.2f MB' % (size / 1e6)
current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)
if _not_in_sphinx():
print('\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb),
end='')
archive_path = os.path.join(data_path, ARCHIVE_FILENAME)
urllib.request.urlretrieve(DOWNLOAD_URL, filename=archive_path,
reporthook=progress)
if _not_in_sphinx():
print('\r', end='')
print("untarring Reuters dataset...")
tarfile.open(archive_path, 'r:gz').extractall(data_path)
print("done.")
parser = ReutersParser()
for filename in glob(os.path.join(data_path, "*.sgm")):
for doc in parser.parse(open(filename, 'rb')):
yield doc
###############################################################################
# Main
###############################################################################
# Create the vectorizer and limit the number of features to a reasonable
# maximum
vectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,
non_negative=True)
# Iterator over parsed Reuters SGML files.
data_stream = stream_reuters_documents()
# We learn a binary classification between the "acq" class and all the others.
# "acq" was chosen as it is more or less evenly distributed in the Reuters
# files. For other datasets, one should take care of creating a test set with
# a realistic portion of positive instances.
all_classes = np.array([0, 1])
positive_class = 'acq'
# Here are some classifiers that support the `partial_fit` method
partial_fit_classifiers = {
'SGD': SGDClassifier(),
'Perceptron': Perceptron(),
'NB Multinomial': MultinomialNB(alpha=0.01),
'Passive-Aggressive': PassiveAggressiveClassifier(),
}
def get_minibatch(doc_iter, size, pos_class=positive_class):
"""Extract a minibatch of examples, return a tuple X_text, y.
Note: size is before excluding invalid docs with no topics assigned.
"""
data = [(u'{title}\n\n{body}'.format(**doc), pos_class in doc['topics'])
for doc in itertools.islice(doc_iter, size)
if doc['topics']]
if not len(data):
return np.asarray([], dtype=int), np.asarray([], dtype=int)
X_text, y = zip(*data)
return X_text, np.asarray(y, dtype=int)
def iter_minibatches(doc_iter, minibatch_size):
"""Generator of minibatches."""
X_text, y = get_minibatch(doc_iter, minibatch_size)
while len(X_text):
yield X_text, y
X_text, y = get_minibatch(doc_iter, minibatch_size)
# test data statistics
test_stats = {'n_test': 0, 'n_test_pos': 0}
# First we hold out a number of examples to estimate accuracy
n_test_documents = 1000
tick = time.time()
X_test_text, y_test = get_minibatch(data_stream, 1000)
parsing_time = time.time() - tick
tick = time.time()
X_test = vectorizer.transform(X_test_text)
vectorizing_time = time.time() - tick
test_stats['n_test'] += len(y_test)
test_stats['n_test_pos'] += sum(y_test)
print("Test set is %d documents (%d positive)" % (len(y_test), sum(y_test)))
def progress(cls_name, stats):
"""Report progress information, return a string."""
duration = time.time() - stats['t0']
s = "%20s classifier : \t" % cls_name
s += "%(n_train)6d train docs (%(n_train_pos)6d positive) " % stats
s += "%(n_test)6d test docs (%(n_test_pos)6d positive) " % test_stats
s += "accuracy: %(accuracy).3f " % stats
s += "in %.2fs (%5d docs/s)" % (duration, stats['n_train'] / duration)
return s
cls_stats = {}
for cls_name in partial_fit_classifiers:
stats = {'n_train': 0, 'n_train_pos': 0,
'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),
'runtime_history': [(0, 0)], 'total_fit_time': 0.0}
cls_stats[cls_name] = stats
get_minibatch(data_stream, n_test_documents)
# Discard test set
# We will feed the classifier with mini-batches of 1000 documents; this means
# we have at most 1000 docs in memory at any time. The smaller the document
# batch, the bigger the relative overhead of the partial fit methods.
minibatch_size = 1000
# Create the data_stream that parses Reuters SGML files and iterates on
# documents as a stream.
minibatch_iterators = iter_minibatches(data_stream, minibatch_size)
total_vect_time = 0.0
# Main loop : iterate on mini-batchs of examples
for i, (X_train_text, y_train) in enumerate(minibatch_iterators):
tick = time.time()
X_train = vectorizer.transform(X_train_text)
total_vect_time += time.time() - tick
for cls_name, cls in partial_fit_classifiers.items():
tick = time.time()
# update estimator with examples in the current mini-batch
cls.partial_fit(X_train, y_train, classes=all_classes)
# accumulate test accuracy stats
cls_stats[cls_name]['total_fit_time'] += time.time() - tick
cls_stats[cls_name]['n_train'] += X_train.shape[0]
cls_stats[cls_name]['n_train_pos'] += sum(y_train)
tick = time.time()
cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)
cls_stats[cls_name]['prediction_time'] = time.time() - tick
acc_history = (cls_stats[cls_name]['accuracy'],
cls_stats[cls_name]['n_train'])
cls_stats[cls_name]['accuracy_history'].append(acc_history)
run_history = (cls_stats[cls_name]['accuracy'],
total_vect_time + cls_stats[cls_name]['total_fit_time'])
cls_stats[cls_name]['runtime_history'].append(run_history)
if i % 3 == 0:
print(progress(cls_name, cls_stats[cls_name]))
if i % 3 == 0:
print('\n')
###############################################################################
# Plot results
###############################################################################
def plot_accuracy(x, y, x_legend):
"""Plot accuracy as a function of x."""
x = np.array(x)
y = np.array(y)
plt.title('Classification accuracy as a function of %s' % x_legend)
plt.xlabel('%s' % x_legend)
plt.ylabel('Accuracy')
plt.grid(True)
plt.plot(x, y)
rcParams['legend.fontsize'] = 10
cls_names = list(sorted(cls_stats.keys()))
# Plot accuracy evolution
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with #examples
accuracy, n_examples = zip(*stats['accuracy_history'])
plot_accuracy(n_examples, accuracy, "training examples (#)")
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
plt.figure()
for _, stats in sorted(cls_stats.items()):
# Plot accuracy evolution with runtime
accuracy, runtime = zip(*stats['runtime_history'])
plot_accuracy(runtime, accuracy, 'runtime (s)')
ax = plt.gca()
ax.set_ylim((0.8, 1))
plt.legend(cls_names, loc='best')
# Plot fitting times
plt.figure()
fig = plt.gcf()
cls_runtime = []
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['total_fit_time'])
cls_runtime.append(total_vect_time)
cls_names.append('Vectorization')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=10)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Training Times')
def autolabel(rectangles):
"""attach some text vi autolabel on rectangles."""
for rect in rectangles:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2.,
1.05 * height, '%.4f' % height,
ha='center', va='bottom')
autolabel(rectangles)
plt.show()
# Plot prediction times
plt.figure()
#fig = plt.gcf()
cls_runtime = []
cls_names = list(sorted(cls_stats.keys()))
for cls_name, stats in sorted(cls_stats.items()):
cls_runtime.append(stats['prediction_time'])
cls_runtime.append(parsing_time)
cls_names.append('Read/Parse\n+Feat.Extr.')
cls_runtime.append(vectorizing_time)
cls_names.append('Hashing\n+Vect.')
bar_colors = rcParams['axes.color_cycle'][:len(cls_names)]
ax = plt.subplot(111)
rectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,
color=bar_colors)
ax.set_xticks(np.linspace(0.25, len(cls_names) - 0.75, len(cls_names)))
ax.set_xticklabels(cls_names, fontsize=8)
plt.setp(plt.xticks()[1], rotation=30)
ymax = max(cls_runtime) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('runtime (s)')
ax.set_title('Prediction Times (%d instances)' % n_test_documents)
autolabel(rectangles)
plt.show()
| bsd-3-clause |
nikitasingh981/scikit-learn | sklearn/covariance/robust_covariance.py | 6 | 30178 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state, check_array
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
verbose : boolean, default False
Control the output verbosity.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
cov_computation_method : callable, default empirical_covariance
The function which will be used to compute the covariance.
Must return shape (n_features, n_features)
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = check_array(X, ensure_min_samples=2, estimator='fast_mcd')
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Read more in the :ref:`User Guide <robust_covariance>`.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, ensure_min_samples=2, estimator='MinCovDet')
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
michaellaier/pymor | src/pymortests/gui.py | 1 | 1974 | # This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
import multiprocessing
from pymor.grids.oned import OnedGrid
from time import sleep
from pymor.gui.qt import visualize_patch
import pytest
import numpy as np
from pymor.analyticalproblems.elliptic import EllipticProblem
from pymor.discretizers.elliptic import discretize_elliptic_cg
from pymor.domaindiscretizers.default import discretize_domain_default
from pymor.grids.rect import RectGrid
from pymortests.base import runmodule
from pymor.domaindescriptions.basic import RectDomain, LineDomain
from pymor.functions.basic import GenericFunction
@pytest.fixture(params=(('matplotlib', RectGrid), ('gl', RectGrid), ('matplotlib', OnedGrid)))
def backend_gridtype(request):
return request.param
def test_visualize_patch(backend_gridtype):
backend, gridtype = backend_gridtype
domain = LineDomain() if gridtype is OnedGrid else RectDomain()
dim = 1 if gridtype is OnedGrid else 2
rhs = GenericFunction(lambda X: np.ones(X.shape[:-1]) * 10, dim) # NOQA
dirichlet = GenericFunction(lambda X: np.zeros(X.shape[:-1]), dim) # NOQA
diffusion = GenericFunction(lambda X: np.ones(X.shape[:-1]), dim) # NOQA
problem = EllipticProblem(domain=domain, rhs=rhs, dirichlet_data=dirichlet, diffusion_functions=(diffusion,))
grid, bi = discretize_domain_default(problem.domain, grid_type=gridtype)
discretization, data = discretize_elliptic_cg(analytical_problem=problem, grid=grid, boundary_info=bi)
U = discretization.solve()
visualize_patch(data['grid'], U=U, backend=backend)
sleep(2) # so gui has a chance to popup
for child in multiprocessing.active_children():
child.terminate()
if __name__ == "__main__":
runmodule(filename=__file__)
| bsd-2-clause |
Moriadry/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/ops.py | 77 | 46403 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes([axis] + list(
labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(
array_ops.gather(transposed.tensor, indexer), temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types,
tc.Union(slice, collections.Hashable, list)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
# Needs to be after checking for slices, since slice objects claim to be
# instances of collections.Hashable but hash() on them fails.
elif isinstance(value, collections.Hashable):
slices[axis_name] = axis.index(value)
elif isinstance(value, list):
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value]
indexers[axis_name] = ops.convert_to_tensor(indexer, dtype=dtypes.int64)
else:
# If type checking is working properly, this shouldn't be possible.
raise TypeError('cannot handle arbitrary types')
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike), int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike,
tc.Optional(string_types), tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values()) if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [
core.convert_to_labeled_tensor(lt) for lt in labeled_tensors
]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(
labeled_tensor.tensor, shape, seed=seed, name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because functional_ops.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = functional_ops.map_fn(tf_fn, labeled_tensor.tensor)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def foldl(fn, labeled_tensor, initial_value, name=None):
"""Left fold on the list of tensors unpacked from labeled_tensor.
See tf.foldl.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type (LabeledTensor, LabeledTensor) -> LabeledTensor.
Its arguments are (accumulated_value, next_value).
labeled_tensor: The input tensor.
initial_value: The initial value of the accumulator.
name: Optional op name.
Returns:
The accumulated value.
"""
with ops.name_scope(name, 'lt_foldl',
[labeled_tensor, initial_value]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
initial_value = core.convert_to_labeled_tensor(initial_value)
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor, ops.Tensor)
def tf_fn(accumulator, next_element):
accumulator_lt = core.LabeledTensor(accumulator, initial_value.axes)
next_element_lt = core.LabeledTensor(
next_element, list(labeled_tensor.axes.values())[1:])
return fn(accumulator_lt, next_element_lt).tensor
foldl_op = functional_ops.foldl(
tf_fn, labeled_tensor.tensor, initializer=initial_value.tensor)
foldl_lt = core.LabeledTensor(foldl_op, initial_value.axes)
return core.identity(foldl_lt, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(tc.Collection(string_types)), tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(
labeled_tensor.tensor, squeeze_dimensions, name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(string_types,
tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [
name for name in axis_scope_order if name in result_axis_names
]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(
a_tensor, b_tensor, transpose_a=transpose_a, transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(
labeled_tensor.tensor, reduction_dimensions, keep_dims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [
name for name in multiples
if labeled_tensor.axes[name].labels is not None
]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [
axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()
]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(labeled_tensor.tensor,
padding_pairs,
mode,
name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(
tc.Union(core.Axes, tc.Collection(
tc.Union(string_types, core.AxisLike)))), tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Optional(dtypes.DType), tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(
labeled_tensor.tensor, msg=message, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.contrib.util.constant_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
| apache-2.0 |
DougBurke/astropy | astropy/visualization/hist.py | 2 | 1834 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from inspect import signature
from ..stats import histogram
__all__ = ['hist']
def hist(x, bins=10, ax=None, **kwargs):
"""Enhanced histogram function
This is a histogram function that enables the use of more sophisticated
algorithms for determining bins. Aside from the ``bins`` argument allowing
a string specified how bins are computed, the parameters are the same
as pylab.hist().
This function was ported from astroML: http://astroML.org/
Parameters
----------
x : array_like
array of data to be histogrammed
bins : int or list or str (optional)
If bins is a string, then it must be one of:
- 'blocks' : use bayesian blocks for dynamic bin widths
- 'knuth' : use Knuth's rule to determine bins
- 'scott' : use Scott's rule to determine bins
- 'freedman' : use the Freedman-diaconis rule to determine bins
ax : Axes instance (optional)
specify the Axes on which to draw the histogram. If not specified,
then the current active axes will be used.
**kwargs :
other keyword arguments are described in ``plt.hist()``.
Notes
-----
Return values are the same as for ``plt.hist()``
See Also
--------
astropy.stats.histogram
"""
# arguments of np.histogram should be passed to astropy.stats.histogram
arglist = list(signature(np.histogram).parameters.keys())[1:]
np_hist_kwds = dict((key, kwargs[key]) for key in arglist if key in kwargs)
hist, bins = histogram(x, bins, **np_hist_kwds)
if ax is None:
# optional dependency; only import if strictly needed.
import matplotlib.pyplot as plt
ax = plt.gca()
return ax.hist(x, bins, **kwargs)
| bsd-3-clause |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/linear_model/tests/test_passive_aggressive.py | 31 | 9812 | from sklearn.utils.testing import assert_true
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
for average in (False, True):
clf = PassiveAggressiveClassifier(
C=1.0, max_iter=30, fit_intercept=fit_intercept,
random_state=0, average=average, tol=None)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
if average:
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
for average in (False, True):
clf = PassiveAggressiveClassifier(
C=1.0, fit_intercept=True, random_state=0,
average=average, max_iter=5)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
if average:
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier(max_iter=5).fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(
C=1.0, loss=loss, fit_intercept=True, n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(
C=1.0, loss=loss, fit_intercept=True, max_iter=2,
shuffle=False, tol=None)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier(max_iter=100)
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_class_weights():
# Test class weights.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(C=0.1, max_iter=100, class_weight=None,
random_state=100)
clf.fit(X2, y2)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = PassiveAggressiveClassifier(C=0.1, max_iter=100,
class_weight={1: 0.001},
random_state=100)
clf.fit(X2, y2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_partial_fit_weight_class_balanced():
# partial_fit with class_weight='balanced' not supported
clf = PassiveAggressiveClassifier(class_weight="balanced", max_iter=100)
assert_raises(ValueError, clf.partial_fit, X, y, classes=np.unique(y))
def test_equal_class_weight():
X2 = [[1, 0], [1, 0], [0, 1], [0, 1]]
y2 = [0, 0, 1, 1]
clf = PassiveAggressiveClassifier(
C=0.1, max_iter=1000, tol=None, class_weight=None)
clf.fit(X2, y2)
# Already balanced, so "balanced" weights should have no effect
clf_balanced = PassiveAggressiveClassifier(
C=0.1, max_iter=1000, tol=None, class_weight="balanced")
clf_balanced.fit(X2, y2)
clf_weighted = PassiveAggressiveClassifier(
C=0.1, max_iter=1000, tol=None, class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X2, y2)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
assert_almost_equal(clf.coef_, clf_balanced.coef_, decimal=2)
def test_wrong_class_weight_label():
# ValueError due to wrong class_weight label.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight={0: 0.5}, max_iter=100)
assert_raises(ValueError, clf.fit, X2, y2)
def test_wrong_class_weight_format():
# ValueError due to wrong class_weight argument type.
X2 = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y2 = [1, 1, 1, -1, -1]
clf = PassiveAggressiveClassifier(class_weight=[0.5], max_iter=100)
assert_raises(ValueError, clf.fit, X2, y2)
clf = PassiveAggressiveClassifier(class_weight="the larch", max_iter=100)
assert_raises(ValueError, clf.fit, X2, y2)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
for average in (False, True):
reg = PassiveAggressiveRegressor(
C=1.0, fit_intercept=fit_intercept,
random_state=0, average=average, max_iter=5)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
if average:
assert_true(hasattr(reg, 'average_coef_'))
assert_true(hasattr(reg, 'average_intercept_'))
assert_true(hasattr(reg, 'standard_intercept_'))
assert_true(hasattr(reg, 'standard_coef_'))
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for average in (False, True):
reg = PassiveAggressiveRegressor(
C=1.0, fit_intercept=True, random_state=0,
average=average, max_iter=100)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
if average:
assert_true(hasattr(reg, 'average_coef_'))
assert_true(hasattr(reg, 'average_intercept_'))
assert_true(hasattr(reg, 'standard_intercept_'))
assert_true(hasattr(reg, 'standard_coef_'))
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(
C=1.0, loss=loss, fit_intercept=True, n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(
C=1.0, tol=None, loss=loss, fit_intercept=True, max_iter=2,
shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor(max_iter=100)
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| mit |
laranea/trading-with-python | cookbook/workingWithDatesAndTime.py | 77 | 1551 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 17:45:02 2011
@author: jev
"""
import time
import datetime as dt
from pandas import *
from pandas.core import datetools
# basic functions
print 'Epoch start: %s' % time.asctime(time.gmtime(0))
print 'Seconds from epoch: %.2f' % time.time()
today = dt.date.today()
print type(today)
print 'Today is %s' % today.strftime('%Y.%m.%d')
# parse datetime
d = dt.datetime.strptime('20120803 21:59:59',"%Y%m%d %H:%M:%S")
# time deltas
someDate = dt.date(2011,8,1)
delta = today - someDate
print 'Delta :', delta
# calculate difference in dates
delta = dt.timedelta(days=20)
print 'Today-delta=', today-delta
t = dt.datetime(*time.strptime('3/30/2004',"%m/%d/%Y")[0:5])
# the '*' operator unpacks the tuple, producing the argument list.
print t
# print every 3d wednesday of the month
for month in xrange(1,13):
t = dt.date(2013,month,1)+datetools.relativedelta(months=1)
offset = datetools.Week(weekday=4)
if t.weekday()<>4:
t_new = t+3*offset
else:
t_new = t+2*offset
t_new = t_new-datetools.relativedelta(days=30)
print t_new.strftime("%B, %d %Y (%A)")
#rng = DateRange(t, t+datetools.YearEnd())
#print rng
# create a range of times
start = dt.datetime(2012,8,1)+datetools.relativedelta(hours=9,minutes=30)
end = dt.datetime(2012,8,1)+datetools.relativedelta(hours=22)
rng = date_range(start,end,freq='30min')
for r in rng: print r.strftime("%Y%m%d %H:%M:%S") | bsd-3-clause |
hstau/manifold-cryo | manifold/gui/gui.py | 1 | 14009 | ''' Graphical user interface for manifold technique
.. Created 2015
.. codeauthor:: Hstau Y Liao <hstau.y.liao@gmail.com>
First input window:
Project name (Proj_name)
Stack file (Stack_file),
Snapshots info (snapshot_info_file),
Resolution (in Angstroms),
Bin size (in multiples of Shannon angle),
Number of comput nodes,
supply or compute the average vol
First output window:
Snapshot angles on the unit sphere (3D display),
Visible snapshot angles (2D display),
Button to see average vol
Average vol (Chimera display)
Second input window:
reaction coordinates for point cloud,
Button to compute topos and chronos
Second output window:
Point cloud (2D or 3D display),
Topos and chronos (display),
Button to trigger 2D movie
Third window:
Chosen reaction coordinates,
Energy landscape (display)
Trajectory selection
'''
import Tkinter
import random
import math
import chimera
from PIL import Image, ImageTk
import matplotlib, sys
import eulerangles as eu
import utilities as ut
import geometry as geo
matplotlib.use('TkAgg')
import numpy as np
import numpy.matlib as npm
from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import axes3d,Axes3D
import matplotlib.pyplot as plt
class first_input_window(Tkinter.Tk):
def __init__(self,parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
self.initialize()
def initialize(self):
self.grid()
# Enter the project name
self.entryVariable = Tkinter.StringVar()
self.entry = Tkinter.Entr(self,textvariable=self.entryVariable)
self.entry.grid(column=0,row=1,sticky='EW')
self.entry.bind("<Return>", self.OnPressEnter)
#self.labelVariable = Tkinter.StringVar()
label = Tkinter.Label(self,text="Project Name", anchor="w")
#label = Tkinter.Label(self,textvariable=self.labelVariable,
# anchor="w")
#self.labelVariable.set(u"Project Name")
label.grid(column=0,row=0,columnspan=2,sticky='EW')
# buttons not needed
#button = Tkinter.Button(self,text=u"Browse",
# command=self.OnButtonClick)
#button.grid(column=1,row=1)
button = Tkinter.Button(self,text=u"Help",
command=self.OnButtonClick)
button.grid(column=2,row=1)
# Enter the snapshot stack name
self.entryVariable = Tkinter.StringVar()
self.entry = Tkinter.Entry(self,textvariable=self.entryVariable)
self.entry.grid(column=0,row=3,sticky='EW')
self.entry.bind("<Return>", self.OnPressEnter)
label = Tkinter.Label(self,text="Stack Name",anchor="w")
label.grid(column=0,row=2,columnspan=2,sticky='EW')
button = Tkinter.Button(self,text=u"Browse",
command=self.OnButtonClick)
button.grid(column=1,row=3)
button = Tkinter.Button(self,text=u"Help",
command=self.OnButtonClick)
button.grid(column=2,row=3)
# Enter the snapshots info file
self.entryVariable = Tkinter.StringVar()
self.entry = Tkinter.Entry(self,textvariable=self.entryVariable)
self.entry.grid(column=0,row=5,sticky='EW')
self.entry.bind("<Return>", self.OnPressEnter)
label = Tkinter.Label(self,text="Stack Info", anchor="w")
label.grid(column=0,row=4,columnspan=2,sticky='EW')
button = Tkinter.Button(self,text=u"Browse",
command=self.OnButtonClick)
button.grid(column=1,row=5)
button = Tkinter.Button(self,text=u"Help",
command=self.OnButtonClick)
button.grid(column=2,row=5)
# Resolution
self.entryVariable = Tkinter.StringVar()
self.entry = Tkinter.Entry(self,textvariable=self.entryVariable)
self.entry.grid(column=0,row=7,sticky='EW')
self.entry.bind("<Return>", self.OnPressEnter)
label = Tkinter.Label(self,text="Estimated Resolution in Angstroms",
anchor="w")
label.grid(column=0,row=6,columnspan=2,sticky='EW')
button = Tkinter.Button(self,text=u"Help",
command=self.OnButtonClick)
button.grid(column=2,row=7)
# Number of nodes
self.entryVariable = Tkinter.StringVar()
self.entry = Tkinter.Entry(self,textvariable=self.entryVariable)
self.entry.grid(column=0,row=15,sticky='EW')
self.entry.bind("<Return>", self.OnPressEnter)
label = Tkinter.Label(self,text="Number of Nodes", anchor="w")
label.grid(column=0,row=14,columnspan=2,sticky='EW')
button = Tkinter.Button(self,text=u"Help",
command=self.OnButtonClick)
button.grid(column=2,row=15)
self.grid_columnconfigure(0,weight=1)
self.resizable(True,False)
def OnButtonClick(self):
#self.labelVariable.set( self.entryVariable.get()+" (You clicked the button)" )
#self.labelVariable.set("You clicked the button !")
print "You clicked the button !"
def OnPressEnter(self,event):
#self.labelVariable.set( self.entryVariable.get()+" (You pressed ENTER)" )
#self.labelVariable.set("You pressed enter !")
print "You pressed enter !"
class CustomToolbar(NavigationToolbar2TkAgg):
def __init__(self,canvas_,parent_):
self.toolitems = (
('Home', 'home', 'home', 'home'),
('Back', 'back', 'back', 'back'),
('Forward', 'forward', 'forward', 'forward'),
(None, None, None, None),
('Pan', 'pan', 'move', 'pan'),
('Zoom', 'zoom', 'zoom_to_rect', 'zoom'),
(None, None, None, None),
('Save', 'save', 'filesave', 'save_figure'),
)
NavigationToolbar2TkAgg.__init__(self,canvas_,parent_)
def set_message(self, msg):
pass
class first_output_window(Tkinter.Tk):
def __init__(self,parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
self.protocol("WM_DELETE_WINDOW", self.dest)
self.main()
def main(self):
# prepare figure, frame, canvas
self.fig = plt.figure(figsize=(4,4))
self.frame = Tkinter.Frame(self)
self.frame.grid(padx=10,pady=10)
self.canvas = FigureCanvasTkAgg(self.fig, master=self.frame)
self.canvas.get_tk_widget().grid(column=0,row=0)
# define 3D interactive plot
self.ax = Axes3D(self.fig)
# display 3D model on chimera's main window
opened = chimera.openModels.open('class2_run10k_ct24_it031_class001.mrc')
tt=chimera.runCommand('volume #0 level 0.02 color pink')
# button widget to show the chosen angle
self.btn = Tkinter.Button(self,text=u'Chosen angle',command=self.disp2D)
self.btn.grid(column=0,row=2,sticky='EW')
cid = self.fig.canvas.mpl_connect('button_press_event', self.onclick)
# button widget for computing chronos and topos
self.btn = Tkinter.Button(self,text=u'Calculate Topos and Chronos now',command=self.disp2D)
self.btn.grid(column=0,row=3,sticky='EW')
cid = self.fig.canvas.mpl_connect('button_press_event', self.onclick)
# generate some fake data points
ss = 100
tt = 50
phi = np.ones((ss,1))
theta = np.ones((ss,1))
for i in range(phi.shape[0]):
phi[i] = random.uniform(0,2*np.pi)
theta[i] = random.uniform(0,np.pi)
phi1 = np.array([[0],[np.pi/2],[np.pi/20]])
theta1 = np.array([[0],[np.pi/2],[np.pi/20]])
phi1 = npm.repmat(phi1,tt,1)
theta1 = npm.repmat(theta1,tt,1)
self.p = geo.euler_to_vect(theta, phi)
self.p1 = geo.euler_to_vect(theta1, phi1)
self.points=[]
# display data points on the unit sphere
self.ax.view_init(elev=0, azim=90)
t = self.ax.scatter(self.p[:,0], self.p[:,1], self.p[:,2], marker='o', s=10, c="goldenrod", alpha=0.6)
t = self.ax.scatter(self.p1[:,0],self.p1[:,1],self.p1[:,2], marker='+', s=10, c="red", alpha=0.6)
# display data points that are in the front view
self.disp2D()
#
self.canvas2 = Tkinter.Canvas(self.frame,bg="white", width=4,height=4)
self.canvas2.configure(cursor="crosshair")
self.canvas2.grid(column=0,row=1)
self.canvas2.bind('<Button-1>', self.point)
# gui grid adjustments
#self.grid_columnconfigure(0,weight=0)
#self.resizable(False,False)
# method to update the display of points in the front view
def disp2D(self):
# print 1
self.fig1 = Figure(figsize=(4,4))
self.ax1 = self.fig1.add_subplot(111)
pt = geo.proj_sphere(self.ax.elev,self.ax.azim,self.p)
pt1 = geo.proj_sphere(self.ax.elev,self.ax.azim,self.p1)
#print self.ax.azim, self.ax.elev
# aa = pt[:,0]
#print aa.ndim
#print aa.shape
self.ax1.scatter(pt[:,0],pt[:,1],marker = 'o',c="goldenrod")
self.ax1.scatter(pt1[:,0],pt1[:,1],marker = '+',c="red")
self.canvas1 = FigureCanvasTkAgg(self.fig1, master=self.frame)
self.canvas1.get_tk_widget().grid(column=0,row=1)
#self.canvas2 = Tkinter.Canvas(self.frame,width=4,height=4)
#self.canvas2.configure(cursor="crosshair")
#self.canvas2.grid(column=0,row=1)
#self.canvas2.bind("<Button-1>", self.point)
cid1 = self.canvas1.mpl_connect('button_press_event', self.onclick1)
chimera.runCommand('turn x %f'%self.ax.azim)
print 'elev = %f, azim=%f,'%(self.ax.elev,self.ax.azim)
return
def dest(self):
self.destroy()
sys.exit()
def scroll(self):
self.canvas2=Tkinter.Canvas(self.frame)
self.frame1=Tkinter.Frame(self.canvas2)
self.vbar=Tkinter.Scrollbar(self.frame,orient="vertical",command=self.canvas2.yview)
self.canvas2.configure(yscrollcommand=self.vbar.set)
self.vbar.pack(side="right",fill="y")
self.canvas2.pack(side='left')
self.canvas2.create_window((0,0),window=self.frame1,anchor='nw')
def point(self, event):
self.canvas1.create_oval(event.x, event.y, event.x+1, event.y+1, fill="black")
self.points.append(event.x)
self.points.append(event.y)
print event.x
return self.points
# method that reports the coordinates of the clicked point
def onclick(self,event):
print 'button=%d, x=%d, y=%d, xdata=%f, ydata=%f,'%(
event.button, event.x, event.y, event.xdata, event.ydata)
def onclick1(self,event):
print 'x=%d, y=%d, xdata=%f, ydata=%f,'%(
event.x, event.y, event.xdata, event.ydata)
class second_input_window(Tkinter.Tk):
def __init__(self,parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
self.initialize()
def initialize(self):
self.grid()
# ====== Reaction coordinates ====================
self.entryVariable = Tkinter.StringVar()
self.entry = Tkinter.Entry(self,textvariable=self.entryVariable)
self.entry.grid(column=0,row=9,sticky='EW')
self.entry.bind("<Return>", self.OnPressEnter)
self.labelVariable = Tkinter.StringVar()
label = Tkinter.Label(self,textvariable=self.labelVariable,
anchor="w")
label.grid(column=0,row=8,columnspan=2,sticky='EW')
self.labelVariable.set(u"Reaction Coordinates")
button = Tkinter.Button(self,text=u"?",
command=self.OnButtonClick)
button.grid(column=2,row=9)
class third_input_window(Tkinter.Tk):
def __init__(self,parent):
Tkinter.Tk.__init__(self,parent)
self.parent = parent
self.initialize()
def initialize(self):
self.grid()
# ====== Trajectory ====================
self.entryVariable = Tkinter.StringVar()
self.entry = Tkinter.Entry(self,textvariable=self.entryVariable)
self.entry.grid(column=0,row=11,sticky='EW')
self.entry.bind("<Return>", self.OnPressEnter)
self.labelVariable = Tkinter.StringVar()
label = Tkinter.Label(self,textvariable=self.labelVariable,
anchor="w")
label.grid(column=0,row=10,columnspan=2,sticky='EW')
self.labelVariable.set(u"Trajectory")
button = Tkinter.Button(self,text=u"Browse",
command=self.OnButtonClick)
button.grid(column=1,row=11)
button = Tkinter.Button(self,text=u"?",
command=self.OnButtonClick)
button.grid(column=2,row=11)
# ====== Number of movie frames ====================
self.entryVariable = Tkinter.StringVar()
self.entry = Tkinter.Entry(self,textvariable=self.entryVariable)
self.entry.grid(column=0,row=13,sticky='EW')
self.entry.bind("<Return>", self.OnPressEnter)
self.labelVariable = Tkinter.StringVar()
label = Tkinter.Label(self,textvariable=self.labelVariable,
anchor="w")
label.grid(column=0,row=12,columnspan=2,sticky='EW')
self.labelVariable.set(u"Number of movie frames")
button = Tkinter.Button(self,text=u"?",
command=self.OnButtonClick)
button.grid(column=2,row=13)
if __name__ == "__main__":
app = first_output_window(None)
app.title('MANIFOLD')
#app.resizable(width=False, height=False)
app.mainloop()
| gpl-2.0 |
rajat1994/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
keflavich/scikit-image | doc/examples/plot_shapes.py | 22 | 1913 | """
======
Shapes
======
This example shows how to draw several different shapes:
- line
- Bezier curve
- polygon
- circle
- ellipse
Anti-aliased drawing for:
- line
- circle
"""
import math
import numpy as np
import matplotlib.pyplot as plt
from skimage.draw import (line, polygon, circle,
circle_perimeter,
ellipse, ellipse_perimeter,
bezier_curve)
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(10, 6))
img = np.zeros((500, 500, 3), dtype=np.double)
# draw line
rr, cc = line(120, 123, 20, 400)
img[rr, cc, 0] = 255
# fill polygon
poly = np.array((
(300, 300),
(480, 320),
(380, 430),
(220, 590),
(300, 300),
))
rr, cc = polygon(poly[:, 0], poly[:, 1], img.shape)
img[rr, cc, 1] = 1
# fill circle
rr, cc = circle(200, 200, 100, img.shape)
img[rr, cc, :] = (1, 1, 0)
# fill ellipse
rr, cc = ellipse(300, 300, 100, 200, img.shape)
img[rr, cc, 2] = 1
# circle
rr, cc = circle_perimeter(120, 400, 15)
img[rr, cc, :] = (1, 0, 0)
# Bezier curve
rr, cc = bezier_curve(70, 100, 10, 10, 150, 100, 1)
img[rr, cc, :] = (1, 0, 0)
# ellipses
rr, cc = ellipse_perimeter(120, 400, 60, 20, orientation=math.pi / 4.)
img[rr, cc, :] = (1, 0, 1)
rr, cc = ellipse_perimeter(120, 400, 60, 20, orientation=-math.pi / 4.)
img[rr, cc, :] = (0, 0, 1)
rr, cc = ellipse_perimeter(120, 400, 60, 20, orientation=math.pi / 2.)
img[rr, cc, :] = (1, 1, 1)
ax1.imshow(img)
ax1.set_title('No anti-aliasing')
ax1.axis('off')
from skimage.draw import line_aa, circle_perimeter_aa
img = np.zeros((100, 100), dtype=np.double)
# anti-aliased line
rr, cc, val = line_aa(12, 12, 20, 50)
img[rr, cc] = val
# anti-aliased circle
rr, cc, val = circle_perimeter_aa(60, 40, 30)
img[rr, cc] = val
ax2.imshow(img, cmap=plt.cm.gray, interpolation='nearest')
ax2.set_title('Anti-aliasing')
ax2.axis('off')
plt.show()
| bsd-3-clause |
adamgreenhall/scikit-learn | sklearn/ensemble/partial_dependence.py | 251 | 15097 | """Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| bsd-3-clause |
jjhelmus/wradlib | doc/source/conf.py | 1 | 10250 | # -*- coding: utf-8 -*-
#
# wradlib documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 26 13:48:08 2011.
# adapted with code from https://github.com/ARM-DOE/pyart/blob/master/doc/source/conf.py
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os, re
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.coverage',
#'sphinx.ext.pngmath',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinxcontrib.bibtex',
'numpydoc',
'matplotlib.sphinxext.plot_directive',
]
# just generate normal png
plot_formats = ['png']
mathjax_path = ("http://cdn.mathjax.org/mathjax/latest/MathJax.js?"
"config=TeX-AMS-MML_HTMLorMML")
pngmath_latex_preamble=r'\usepackage[active]{preview}' # + other custom stuff for inline math, such as non-default math fonts etc.
pngmath_use_preview=True
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wradlib'
copyright = u'2011-2016, wradlib developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import wradlib
# The short X.Y version (including the .devXXXX suffix if present)
version = re.sub(r'^(\d+\.\d+)\.\d+(.*)', r'\1\2', wradlib.__version__)
if 'dev' not in version:
# strip all other suffixes
version = re.sub(r'^(\d+\.\d+).*?$', r'\1', version)
else:
# retain the .dev suffix, but clean it up
#version = re.sub(r'(\.dev\d*).*?$', r'\1', version)
pass
# The full version, including alpha/beta/rc tags.
release = wradlib.__version__
# full wradlib version in CI built docselse:
if 'CI' in os.environ and os.environ['CI'] == 'true':
version = release
print("VERSION", wradlib.__version__, version)
# # get current version from file
# with open("../../version") as f:
# VERSION = f.read()
# VERSION = VERSION.strip()
# MAJOR, MINOR, BUGFIX = VERSION.split(".")
#
# # The short X.Y version.
# version = '%s.%s' % (MAJOR, MINOR)
# # The full version, including alpha/beta/rc tags.
# release = '%s.%s.%s' % (MAJOR, MINOR, BUGFIX)
#
# project = project + " v" + release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {'sticky_navigation' : True}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
##html_theme_options = {"sidebarbgcolor": "black",
## "relbarbgcolor": "black",
## "headtextcolor": "#4A4344",
## "footerbgcolor": "black" }
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = project
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "wradlib"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = "images/wradliblogo_small.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
##html_sidebars = {
## '**': ['globaltoc.html', 'sourcelink.html', 'searchbox.html'],
## 'using/windows': ['windowssidebar.html', 'searchbox.html'],
##}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'wradlibdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'wradlib.tex', u'wradlib Documentation',
u'wradlib developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
intersphinx_mapping = {
'python': ('http://docs.python.org/2', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.org/', None),
'sphinx': ('http://sphinx-doc.org', None),
}
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wradlib', u'wradlib Documentation',
[u'wradlib developers'], 1)
]
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = False
| mit |
magnusax/ml-meta-wrapper | gazer/classifiers/sgdescent.py | 1 | 2644 | from scipy.stats import uniform
from sklearn.linear_model import SGDClassifier
from ..sampling import Loguniform
from ..base import BaseClassifier
class MetaSGDClassifier(BaseClassifier):
def __init__(self, loss='hinge', penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, max_iter=5, learning_rate='optimal', random_state=None):
self.name = "sgd_%s" % str(loss)
self.max_n_iter = 1000
self.init_params = {}
self.init_params['loss'] = loss
self.init_params['penalty'] = penalty
self.init_params['alpha'] = alpha
self.init_params['l1_ratio'] = l1_ratio
self.init_params['fit_intercept'] = fit_intercept
self.init_params['max_iter'] = max_iter
self.init_params['learning_rate'] = learning_rate
self.init_params['random_state'] = random_state
# Initialize algorithm and make it available
self.estimator = self._get_clf()
# Initialize dictionary with trainable parameters
self.cv_params = self._set_cv_params()
# Initialize list which can be populated with params to tune
self.cv_params_to_tune = []
def _get_clf(self):
return SGDClassifier(**self.init_params)
def get_info(self):
return {'does_classification': True,
'does_multiclass': True,
'does_regression': False,
'predict_probas':
hasattr(self.estimator, 'predict_proba')}
def adjust_params(self, par):
return super().adjust_params(par)
def set_tune_params(self, params, num_params=1, mode='random', keys=list()):
return super().set_tune_params(params, num_params, mode, keys)
def _set_cv_params(self):
""" Dictionary containing all trainable parameters """
# Trainable params available in:
# self.cv_params[i].keys() for i in len(self.cv_params)
return [
{'penalty': ['l1', 'l2'],
'alpha': Loguniform(low=1e-8, high=1e+8),
'fit_intercept': [True, False],
'class_weight': ['balanced', None],
'max_iter': [5, 10, 25, 50, 100],
'learning_rate': ['optimal'] },
{'penalty': ['elasticnet'],
'l1_ratio': uniform(0, 1),
'alpha': Loguniform(low=1e-8, high=1e+8),
'fit_intercept': [True, False],
'class_weight': ['balanced', None],
'max_iter': [5, 10, 25, 50, 100],
'learning_rate': ['optimal'] } ] | mit |
kevinjos/planet-kaggle | code/planetutils.py | 1 | 3161 | import pandas as pd
import numpy as np
import cv2
import os
import random
def mkdir(d):
if not os.path.exists(d):
os.mkdir(d)
class FileHandler(object):
def __init__(self, input_basepath="/Users/kjs/repos/planet"):
# Directories
self.basepath = input_basepath
self.path = self.basepath + "/input"
self.train_tif = "train-tif"
self.test_jpg = "test-jpg"
self.train_jpg = "train-jpg"
self.test_tif = "test-tif"
# Files
self.train_labels_csv = "train_v2.csv"
# Stats
self.train_n = 40479
self.test_n = 61191
def _get_train_labels(self):
with open(self.path + "/" + self.train_labels_csv, "r") as fp:
res = {}
fp.readline() # Skip the header
for l in fp:
k, v = l.split(",")
v = v.strip().split(" ")
res[k] = v
return res
def _get_iter(self, samp, imgtyp="tif", h=256, w=256, maxn=None):
expected_samp = ("train", "test")
expected_imgtyp = ("jpg", "tif")
path = self.path + "/"
if samp not in expected_samp or imgtyp not in expected_imgtyp:
raise NameError(samp, "%s found but expected string in %s or %s" % (samp, expected_samp, expected_imgtyp))
elif samp == "train" and imgtyp == "tif":
path += self.train_tif
elif samp == "train" and imgtyp == "jpg":
path += self.train_jpg
elif samp == "test" and imgtyp == "tif":
path += self.test_tif
elif samp == "test" and imgtyp == "jpg":
path += self.test_jpg
files = os.listdir(path)
random.shuffle(files)
for fn in files[:maxn]:
name = fn.split(".")[0]
img = cv2.imread(path + "/" + fn, -1)
img = cv2.resize(img, (h, w))
yield((name, img))
class DataHandler(FileHandler):
def __init__(self, **kwargs):
super(DataHandler, self).__init__(**kwargs)
self.train_labels = None
self.set_train_labels()
def set_train_labels(self):
train_labels_raw = self._get_train_labels()
pd_encoded = {"name": []}
for k, v in train_labels_raw.iteritems():
for t in v:
if t not in pd_encoded:
pd_encoded[t] = []
for k, v in train_labels_raw.iteritems():
for pd_k in pd_encoded.keys():
if pd_k == "name":
pd_encoded["name"].append(k)
continue
pd_encoded[pd_k].append(pd_k in v)
pd_encoded = pd.DataFrame(pd_encoded)
self.train_labels = pd_encoded
def get_train_iter(self, imgtyp="tif", h=256, w=256, maxn=None):
train_iter = self._get_iter("train", imgtyp=imgtyp, h=h, w=w, maxn=maxn)
for name, X in train_iter:
Y = self.train_labels.loc[self.train_labels["name"] == name]
yield (X, Y)
def get_test_iter(self, imgtyp="tif", h=256, w=256, maxn=None):
test_iter = self._get_iter("test", imgtyp=imgtyp, h=h, w=w, maxn=maxn)
return test_iter
| agpl-3.0 |
yashpungaliya/MailingListParser | lib/input/mbox/keyword_clustering.py | 1 | 7192 | import json
import mailbox
import numpy as np
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.cluster import KMeans
from sklearn.feature_extraction.text import TfidfVectorizer
from analysis.author import generate_author_ranking
from util import custom_stopwords
from util.read_utils import *
def get_top_authors(top_n, json_filename):
"""
:return:Top authors and indices
"""
top_authors = set()
top_authors_index = dict()
author_scores = generate_author_ranking(json_filename, output_filename=None, active_score=2, passive_score=1, write_to_file=False)
index = 0
for email_addr, author_score in author_scores:
index += 1
top_authors.add(email_addr)
top_authors_index[email_addr] = index
if index == top_n:
break
return top_authors, top_authors_index
def save_sparse_csr(filename, array):
np.savez(filename,data = array.data ,indices=array.indices,
indptr =array.indptr, shape=array.shape )
def get_message_body(message):
"""
:param message:
:return:Message Body
"""
msg_body = None
if message.is_multipart():
for part in message.walk():
if part.is_multipart():
for subpart in part.walk():
msg_body = subpart.get_payload(decode=False)
else:
msg_body = part.get_payload(decode=False)
else:
msg_body = message.get_payload(decode=False)
msg_body = msg_body.splitlines()
for num in range(len(msg_body)):
if msg_body[num]:
if msg_body[num] == "---":
msg_body = msg_body[:num]
break
if msg_body[num][0] == '>' or msg_body[num][0] == '+' or msg_body[num][0] == '-' or msg_body[num][0] == '@':
msg_body[num] = ""
if num > 0:
msg_body[num - 1] = ""
elif msg_body[num][:3] == "Cc:":
msg_body[num] = ""
elif msg_body[num][:14] == "Signed-off-by:":
msg_body[num] = ""
elif msg_body[num][:9] == "Acked-by:":
msg_body[num] = ""
elif msg_body[num][:5] == "From:":
msg_body[num] = ""
elif msg_body[num][:10] == "Tested-by:":
msg_body[num] = ""
elif msg_body[num][:12] == "Reported-by:":
msg_body[num] = ""
elif msg_body[num][:12] == "Reviewed-by:":
msg_body[num] = ""
elif msg_body[num][:5] == "Link:":
msg_body[num] = ""
elif msg_body[num][:13] == "Suggested-by:":
msg_body[num] = ""
msg_body = [x.strip() for x in msg_body]
msg_body = [x for x in msg_body if x != ""]
msg_body = '\n'.join(msg_body)
return msg_body
def generate_kmeans_clustering(mbox_filename, output_filename, author_uid_filename, json_filename, top_n = None):
"""
From the .MBOX file, this function extracts the email content is extracted using two predefined classes
available in the Python Standard Library: Mailbox and Message. Feature vectors are created for all the authors
by obtaining meaningful words from the mail content, after removing the stop words, using NLTK libraries.
The words obtained are transformed using stemming or lemmatization before adding these words to the word list of
the corresponding authors. A matrix is created out of these word lists such that row set is the union of terms of
all the authors and the column set contains the authors. If a term does not appear in a document, the corresponding
matrix entry would be zero. The resulting matrix is called term-document matrix. Then tf-idf analysis is performed
on the term-document matrix. Finally the top-10 words of each author is listed by their weight values.
:param mbox_filename: Contains the absolute or relative address of the MBOX file to be opened
:return: Term Document Matrix: The columns of the matrix are the users and the rows of the matrix are the keywords.
Each entry corresponds to the tf-idf normalized coefficient of the keyword for a user. If a keyword is not present
in the top-10 keywords of a user, then the corresponding matrix entry would be zero. Also returns the feature names.
"""
english_stopwords = set(stopwords.words('english')) | custom_stopwords.common_words | custom_stopwords.custom_words
email_re = re.compile(r'[\w\.-]+@[\w\.-]+')
wnl = WordNetLemmatizer()
print("Reading messages from MBOX file...")
mailbox_obj = mailbox.mbox(mbox_filename)
with open(author_uid_filename, 'r') as map_file:
author_uid_map = json.load(map_file)
map_file.close()
top_n = min(len(author_uid_map), top_n)
top_authors, top_authors_index = get_top_authors(top_n, json_filename)
keywords_list = [list() for x in range(top_n+1)]
i = 0 # Number of emails processed
for message in mailbox_obj:
temp = email_re.search(str(message['From']))
from_addr = temp.group(0) if temp is not None else message['From']
if top_n is not None and from_addr not in top_authors:
continue
if top_n is None and from_addr not in author_uid_map.keys():
continue
msg_body = get_message_body(message)
if from_addr is None:
from_addr = message['From']
msg_tokens = [x.lower() for x in re.sub('\W+', ' ', msg_body).split() if 2 < len(x) < 30]
# Toggle comment below if numbers and underscores should also be removed.
# msg_tokens = [x for x in re.sub('[^a-zA-Z]+', ' ', msg_body).split() if 2 < len(x) < 30]
msg_tokens = [wnl.lemmatize(x) for x in msg_tokens if not x.isdigit() and x not in from_addr]
msg_tokens = [x for x in msg_tokens if x not in english_stopwords]
keywords_list[top_authors_index[from_addr]].extend(msg_tokens)
i += 1
if not i % 10000:
print(i, "of", len(mailbox_obj), "messages processed.")
for num in range(len(keywords_list)):
keywords_list[num] = " ".join(keywords_list[num])
print("Performing tf-idf analysis on the term-document matrix...")
vectorizer = TfidfVectorizer(analyzer='word', stop_words=english_stopwords, max_features=200000,
use_idf=True, ngram_range=(1, 4))
tfidf_matrix = vectorizer.fit_transform(keywords_list).toarray()
# with open("author_top_index.json", 'w') as json_file:
# json.dump(top_authors_index, json_file)
# print(feature_names)
kmeans_classifier = KMeans(n_clusters=8, n_init=4)
labels = kmeans_classifier.fit_predict(tfidf_matrix)
clustering = dict()
for i in range(len(labels)):
x = None
for k, v in author_uid_map.items():
if v == i:
x = k
if clustering.get(str(labels[i]), None) is None:
clustering[str(labels[i])] = [x]
else:
clustering[str(labels[i])].append(x)
with open(output_filename, 'w') as out_file:
json.dump(clustering, out_file)
out_file.close()
| gpl-3.0 |
plotly/plotly.py | packages/python/plotly/plotly/figure_factory/_dendrogram.py | 1 | 13956 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from collections import OrderedDict
from plotly import exceptions, optional_imports
from plotly.graph_objs import graph_objs
# Optional imports, may be None for users that only use our core functionality.
np = optional_imports.get_module("numpy")
scp = optional_imports.get_module("scipy")
sch = optional_imports.get_module("scipy.cluster.hierarchy")
scs = optional_imports.get_module("scipy.spatial")
def create_dendrogram(
X,
orientation="bottom",
labels=None,
colorscale=None,
distfun=None,
linkagefun=lambda x: sch.linkage(x, "complete"),
hovertext=None,
color_threshold=None,
):
"""
Function that returns a dendrogram Plotly figure object. This is a thin
wrapper around scipy.cluster.hierarchy.dendrogram.
See also https://dash.plot.ly/dash-bio/clustergram.
:param (ndarray) X: Matrix of observations as array of arrays
:param (str) orientation: 'top', 'right', 'bottom', or 'left'
:param (list) labels: List of axis category labels(observation labels)
:param (list) colorscale: Optional colorscale for the dendrogram tree.
Requires 8 colors to be specified, the 7th of
which is ignored. With scipy>=1.5.0, the 2nd, 3rd
and 6th are used twice as often as the others.
Given a shorter list, the missing values are
replaced with defaults and with a longer list the
extra values are ignored.
:param (function) distfun: Function to compute the pairwise distance from
the observations
:param (function) linkagefun: Function to compute the linkage matrix from
the pairwise distances
:param (list[list]) hovertext: List of hovertext for constituent traces of dendrogram
clusters
:param (double) color_threshold: Value at which the separation of clusters will be made
Example 1: Simple bottom oriented dendrogram
>>> from plotly.figure_factory import create_dendrogram
>>> import numpy as np
>>> X = np.random.rand(10,10)
>>> fig = create_dendrogram(X)
>>> fig.show()
Example 2: Dendrogram to put on the left of the heatmap
>>> from plotly.figure_factory import create_dendrogram
>>> import numpy as np
>>> X = np.random.rand(5,5)
>>> names = ['Jack', 'Oxana', 'John', 'Chelsea', 'Mark']
>>> dendro = create_dendrogram(X, orientation='right', labels=names)
>>> dendro.update_layout({'width':700, 'height':500}) # doctest: +SKIP
>>> dendro.show()
Example 3: Dendrogram with Pandas
>>> from plotly.figure_factory import create_dendrogram
>>> import numpy as np
>>> import pandas as pd
>>> Index= ['A','B','C','D','E','F','G','H','I','J']
>>> df = pd.DataFrame(abs(np.random.randn(10, 10)), index=Index)
>>> fig = create_dendrogram(df, labels=Index)
>>> fig.show()
"""
if not scp or not scs or not sch:
raise ImportError(
"FigureFactory.create_dendrogram requires scipy, \
scipy.spatial and scipy.hierarchy"
)
s = X.shape
if len(s) != 2:
exceptions.PlotlyError("X should be 2-dimensional array.")
if distfun is None:
distfun = scs.distance.pdist
dendrogram = _Dendrogram(
X,
orientation,
labels,
colorscale,
distfun=distfun,
linkagefun=linkagefun,
hovertext=hovertext,
color_threshold=color_threshold,
)
return graph_objs.Figure(data=dendrogram.data, layout=dendrogram.layout)
class _Dendrogram(object):
"""Refer to FigureFactory.create_dendrogram() for docstring."""
def __init__(
self,
X,
orientation="bottom",
labels=None,
colorscale=None,
width=np.inf,
height=np.inf,
xaxis="xaxis",
yaxis="yaxis",
distfun=None,
linkagefun=lambda x: sch.linkage(x, "complete"),
hovertext=None,
color_threshold=None,
):
self.orientation = orientation
self.labels = labels
self.xaxis = xaxis
self.yaxis = yaxis
self.data = []
self.leaves = []
self.sign = {self.xaxis: 1, self.yaxis: 1}
self.layout = {self.xaxis: {}, self.yaxis: {}}
if self.orientation in ["left", "bottom"]:
self.sign[self.xaxis] = 1
else:
self.sign[self.xaxis] = -1
if self.orientation in ["right", "bottom"]:
self.sign[self.yaxis] = 1
else:
self.sign[self.yaxis] = -1
if distfun is None:
distfun = scs.distance.pdist
(dd_traces, xvals, yvals, ordered_labels, leaves) = self.get_dendrogram_traces(
X, colorscale, distfun, linkagefun, hovertext, color_threshold
)
self.labels = ordered_labels
self.leaves = leaves
yvals_flat = yvals.flatten()
xvals_flat = xvals.flatten()
self.zero_vals = []
for i in range(len(yvals_flat)):
if yvals_flat[i] == 0.0 and xvals_flat[i] not in self.zero_vals:
self.zero_vals.append(xvals_flat[i])
if len(self.zero_vals) > len(yvals) + 1:
# If the length of zero_vals is larger than the length of yvals,
# it means that there are wrong vals because of the identicial samples.
# Three and more identicial samples will make the yvals of spliting
# center into 0 and it will accidentally take it as leaves.
l_border = int(min(self.zero_vals))
r_border = int(max(self.zero_vals))
correct_leaves_pos = range(
l_border, r_border + 1, int((r_border - l_border) / len(yvals))
)
# Regenerating the leaves pos from the self.zero_vals with equally intervals.
self.zero_vals = [v for v in correct_leaves_pos]
self.zero_vals.sort()
self.layout = self.set_figure_layout(width, height)
self.data = dd_traces
def get_color_dict(self, colorscale):
"""
Returns colorscale used for dendrogram tree clusters.
:param (list) colorscale: Colors to use for the plot in rgb format.
:rtype (dict): A dict of default colors mapped to the user colorscale.
"""
# These are the color codes returned for dendrograms
# We're replacing them with nicer colors
# This list is the colors that can be used by dendrogram, which were
# determined as the combination of the default above_threshold_color and
# the default color palette (see scipy/cluster/hierarchy.py)
d = {
"r": "red",
"g": "green",
"b": "blue",
"c": "cyan",
"m": "magenta",
"y": "yellow",
"k": "black",
# TODO: 'w' doesn't seem to be in the default color
# palette in scipy/cluster/hierarchy.py
"w": "white",
}
default_colors = OrderedDict(sorted(d.items(), key=lambda t: t[0]))
if colorscale is None:
rgb_colorscale = [
"rgb(0,116,217)", # blue
"rgb(35,205,205)", # cyan
"rgb(61,153,112)", # green
"rgb(40,35,35)", # black
"rgb(133,20,75)", # magenta
"rgb(255,65,54)", # red
"rgb(255,255,255)", # white
"rgb(255,220,0)", # yellow
]
else:
rgb_colorscale = colorscale
for i in range(len(default_colors.keys())):
k = list(default_colors.keys())[i] # PY3 won't index keys
if i < len(rgb_colorscale):
default_colors[k] = rgb_colorscale[i]
# add support for cyclic format colors as introduced in scipy===1.5.0
# before this, the colors were named 'r', 'b', 'y' etc., now they are
# named 'C0', 'C1', etc. To keep the colors consistent regardless of the
# scipy version, we try as much as possible to map the new colors to the
# old colors
# this mapping was found by inpecting scipy/cluster/hierarchy.py (see
# comment above).
new_old_color_map = [
("C0", "b"),
("C1", "g"),
("C2", "r"),
("C3", "c"),
("C4", "m"),
("C5", "y"),
("C6", "k"),
("C7", "g"),
("C8", "r"),
("C9", "c"),
]
for nc, oc in new_old_color_map:
try:
default_colors[nc] = default_colors[oc]
except KeyError:
# it could happen that the old color isn't found (if a custom
# colorscale was specified), in this case we set it to an
# arbitrary default.
default_colors[n] = "rgb(0,116,217)"
return default_colors
def set_axis_layout(self, axis_key):
"""
Sets and returns default axis object for dendrogram figure.
:param (str) axis_key: E.g., 'xaxis', 'xaxis1', 'yaxis', yaxis1', etc.
:rtype (dict): An axis_key dictionary with set parameters.
"""
axis_defaults = {
"type": "linear",
"ticks": "outside",
"mirror": "allticks",
"rangemode": "tozero",
"showticklabels": True,
"zeroline": False,
"showgrid": False,
"showline": True,
}
if len(self.labels) != 0:
axis_key_labels = self.xaxis
if self.orientation in ["left", "right"]:
axis_key_labels = self.yaxis
if axis_key_labels not in self.layout:
self.layout[axis_key_labels] = {}
self.layout[axis_key_labels]["tickvals"] = [
zv * self.sign[axis_key] for zv in self.zero_vals
]
self.layout[axis_key_labels]["ticktext"] = self.labels
self.layout[axis_key_labels]["tickmode"] = "array"
self.layout[axis_key].update(axis_defaults)
return self.layout[axis_key]
def set_figure_layout(self, width, height):
"""
Sets and returns default layout object for dendrogram figure.
"""
self.layout.update(
{
"showlegend": False,
"autosize": False,
"hovermode": "closest",
"width": width,
"height": height,
}
)
self.set_axis_layout(self.xaxis)
self.set_axis_layout(self.yaxis)
return self.layout
def get_dendrogram_traces(
self, X, colorscale, distfun, linkagefun, hovertext, color_threshold
):
"""
Calculates all the elements needed for plotting a dendrogram.
:param (ndarray) X: Matrix of observations as array of arrays
:param (list) colorscale: Color scale for dendrogram tree clusters
:param (function) distfun: Function to compute the pairwise distance
from the observations
:param (function) linkagefun: Function to compute the linkage matrix
from the pairwise distances
:param (list) hovertext: List of hovertext for constituent traces of dendrogram
:rtype (tuple): Contains all the traces in the following order:
(a) trace_list: List of Plotly trace objects for dendrogram tree
(b) icoord: All X points of the dendrogram tree as array of arrays
with length 4
(c) dcoord: All Y points of the dendrogram tree as array of arrays
with length 4
(d) ordered_labels: leaf labels in the order they are going to
appear on the plot
(e) P['leaves']: left-to-right traversal of the leaves
"""
d = distfun(X)
Z = linkagefun(d)
P = sch.dendrogram(
Z,
orientation=self.orientation,
labels=self.labels,
no_plot=True,
color_threshold=color_threshold,
)
icoord = scp.array(P["icoord"])
dcoord = scp.array(P["dcoord"])
ordered_labels = scp.array(P["ivl"])
color_list = scp.array(P["color_list"])
colors = self.get_color_dict(colorscale)
trace_list = []
for i in range(len(icoord)):
# xs and ys are arrays of 4 points that make up the '∩' shapes
# of the dendrogram tree
if self.orientation in ["top", "bottom"]:
xs = icoord[i]
else:
xs = dcoord[i]
if self.orientation in ["top", "bottom"]:
ys = dcoord[i]
else:
ys = icoord[i]
color_key = color_list[i]
hovertext_label = None
if hovertext:
hovertext_label = hovertext[i]
trace = dict(
type="scatter",
x=np.multiply(self.sign[self.xaxis], xs),
y=np.multiply(self.sign[self.yaxis], ys),
mode="lines",
marker=dict(color=colors[color_key]),
text=hovertext_label,
hoverinfo="text",
)
try:
x_index = int(self.xaxis[-1])
except ValueError:
x_index = ""
try:
y_index = int(self.yaxis[-1])
except ValueError:
y_index = ""
trace["xaxis"] = "x" + x_index
trace["yaxis"] = "y" + y_index
trace_list.append(trace)
return trace_list, icoord, dcoord, ordered_labels, P["leaves"]
| mit |
IQuOD/AutoQC | catchall.py | 1 | 6808 | # algorithm:
# 0. remove from consideration any QC test that fails to produce TPR / FPR >= some tunable threshold
# 1. remove from consideration any bad profile not flagged by any test; put these aside for new qc test design
# 2. accept all individual qc tests with 0% fpr; remove these from consideration, along with all profiles they flag
# 3. form list of ntuple AND combos, add their decisions to consideration
# 4. identify profiles flagged by exactly one combination. Accept that combination, drop all profiles marked by this combination, and drop this combination from further consideration
# 5. drop the remaining combination with the highest false positive rate (note at this step all remaining profiles are flagged by at least two combination, so this will not raise the false negative rate).
# 6. go back to 4; loop until the list of accepted combinations flags all bad profiles not dropped in step 1.
ar = __import__('analyse-results')
import util.main as main
import util.dbutils as dbutils
import itertools, sys, json, getopt
from operator import itemgetter
def ntuples(names, n=2):
'''
given a list of names of tests, form every ntuple up to and including n combinations from the list
return as a list of tuples.
'''
combos = []
for i in range(2,n+1):
combos += itertools.combinations(names, i)
return combos
def amend(combo, df):
'''
add a column to df describing the results of combo
column title will be the combo elements joined with '&'
'''
decision = df[combo[0]]
for test in combo[1:]:
decision = decision & df[test]
name = '&'.join(combo)
return df.assign(xx=decision).rename(index=str, columns={'xx': name})
# parse command line options
options, remainder = getopt.getopt(sys.argv[1:], 't:d:n:o:h')
targetdb = 'iquod.db'
dbtable = 'iquod'
outfile = 'htp.json'
samplesize = None
for opt, arg in options:
if opt == '-d':
dbtable = arg
if opt == '-t':
targetdb = arg
if opt == '-n':
samplesize = int(arg)
if opt == '-o':
outfile = arg
if opt == '-h':
print('usage:')
print('-d <db table name to read from>')
print('-t <name of db file>')
print('-n <number of profiles to consider>')
print('-o <filename to write json results out to>')
print('-h print this help message and quit')
if samplesize is None:
print('please provide a sample size to consider with the -n flag')
print('-h to print usage')
# Read QC test specifications if required.
groupdefinition = ar.read_qc_groups()
# Read data from database into a pandas data frame.
df = dbutils.db_to_df(table=dbtable,
targetdb=targetdb,
filter_on_wire_break_test = False,
filter_on_tests = groupdefinition,
n_to_extract = samplesize,
pad=2,
XBTbelow=True,
mark_training=False)
testNames = df.columns[2:].values.tolist()
# declare some downstream constructs
accepted = []
unflagged = []
fprs = []
bad = df.loc[df['Truth']]
bad.reset_index(inplace=True, drop=True)
# mark chosen profiles as part of the training set
all_uids = main.dbinteract('SELECT uid from ' + dbtable + ';', targetdb=targetdb)
if mark_training:
for uid in all_uids:
uid = uid[0]
is_training = int(uid in df['uid'].astype(int).to_numpy())
query = "UPDATE " + dbtable + " SET training=" + str(is_training) + " WHERE uid=" + str(uid) + ";"
main.dbinteract(query, targetdb=targetdb)
# algo. step 0:
# demand individual QC tests have TPR/FPR > some threshold
perf_thresh = 2
drop_tests = []
for test in testNames:
tpr, fpr, fnr, tnr = main.calcRates(df[test].tolist(), df['Truth'].tolist())
if fpr > 0 and tpr / fpr < perf_thresh:
print('dropping', test, '; tpr/fpr = ', tpr/fpr)
df.drop([test], axis=1)
bad.drop([test], axis=1)
drop_tests.append(test)
testNames = [x for x in testNames if x not in drop_tests]
# algo. step 1:
# note profiles that weren't flagged by any test
for i in range(len(bad)):
if not any(bad.iloc[i][testNames]):
unflagged.append(bad.iloc[i]['uid'])
# drop these from consideration
bad = bad[~bad['uid'].isin(unflagged)]
# algo. step 2:
# assess fprs for individual tests
for x in testNames:
tpr, fpr, fnr, tnr = main.calcRates(df[x].to_numpy(), df['Truth'].to_numpy())
fprs.append([x, fpr, tpr])
# accept tests that flag bad profiles with no false positives
print('number of bad profiles to consider:', len(bad))
print('these tests accepted for having no false poitives and more than zero true positives:')
for test in fprs:
if test[1] == 0 and test[2] > 0:
accepted.append(test[0])
print(test[0])
bad = bad[bad[test[0]]==False]
bad = bad.drop([test[0]], axis=1)
testNames.remove(test[0])
fprs = [elt for elt in fprs if elt[0] not in accepted]
print('number of bad profiles remaining:', len(bad))
# algo. step 3
# add a column to df for each combo, summarizing its decision for each profile
combos = ntuples(testNames)
combonames = ['&'.join(x) for x in combos]
for combo in combos:
bad = amend(combo, bad)
df = amend(combo, df)
# assess tpr, fpr for each test and combo:
for x in combonames:
tpr, fpr, fnr, tnr = main.calcRates(df[x].to_numpy(), df['Truth'].to_numpy())
fprs.append([x, fpr, tpr])
fprs.sort(key=lambda tup: tup[1])
# algo. step 4
while len(bad) > 0:
nosingleflags = True
for i in range(len(bad)):
x = bad.iloc[i][testNames+combonames]
if sum(x) == 1:
winner = x[x].keys()[0]
accepted.append(winner) # accept the combo as the only one flagging this bad profile
ff = [x for x in fprs if x[0] == winner][0][1]
tf = [x for x in fprs if x[0] == winner][0][2]
print('accepted', winner, 'tpr=', tf, '; fpr=', ff)
bad = bad[bad[winner]==False] # drop all bad profiles flagged by this combo
bad = bad.drop([winner], axis=1) # remove the combo from consideration
testNames = [elt for elt in testNames if elt is not winner]
combonames = [elt for elt in combonames if elt is not winner]
fprs = [elt for elt in fprs if elt[0] is not winner]
nosingleflags=False
break
# algo. step 5
if nosingleflags:
maxfpr = fprs[-1][0]
bad = bad.drop([maxfpr], axis=1)
testNames = [x for x in testNames if x is not maxfpr]
combonames = [x for x in combonames if x is not maxfpr]
del fprs[-1]
print('profiles not caught by any test:')
print(unflagged)
f = open(outfile, 'w')
r = {'tests': accepted}
json.dump(r, f)
f.close()
| mit |
dkillick/iris | docs/iris/src/conf.py | 1 | 11165 | # (C) British Crown Copyright 2010 - 2018, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# -*- coding: utf-8 -*-
#
# Iris documentation build configuration file, created by
# sphinx-quickstart on Tue May 25 13:26:23 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# add some sample files from the developers guide..
sys.path.append(os.path.abspath(os.path.join('developers_guide')))
# -- General configuration -----------------------------------------------------
# Temporary value for use by LaTeX and 'man' output.
# Deleted at the end of the module.
_authors = ('Iris developers')
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.graphviz',
'sphinx.ext.imgmath',
'sphinx.ext.intersphinx',
'matplotlib.sphinxext.mathmpl',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
# better class documentation
'custom_class_autodoc',
# Data instance __repr__ filter.
'custom_data_autodoc',
'gen_example_directory',
'generate_package_rst',
'gen_gallery',
# Add labels to figures automatically
'auto_label_figures',
]
# list of packages to document
autopackage_name = ['iris']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'contents'
# General information about the project.
project = u'Iris'
# define the copyright information for latex builds. Note, for html builds,
# the copyright exists directly inside "_templates/layout.html"
upper_copy_year = datetime.datetime.now().year
copyright = u'British Crown Copyright 2010 - {}, Met Office'.format(upper_copy_year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import iris
# The short X.Y version.
if iris.__version__ == 'dev':
version = 'dev'
else:
# major.feature(.minor)-dev -> major.minor
version = '.'.join(iris.__version__.split('-')[0].split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = iris.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['sphinxext', 'build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Define the default highlight language. This also allows the >>> removal
# javascript (copybutton.js) to function.
highlight_language = 'default'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['iris']
intersphinx_mapping = {
'cartopy': ('http://scitools.org.uk/cartopy/docs/latest/', None),
'iris-grib': ('http://iris-grib.readthedocs.io/en/latest/', None),
'matplotlib': ('http://matplotlib.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'python': ('http://docs.python.org/2.7', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
}
# -- Extlinks extension -------------------------------------------------------
extlinks = {'issue': ('https://github.com/SciTools/iris/issues/%s',
'Issue #'),
'pull': ('https://github.com/SciTools/iris/pull/%s', 'PR #'),
}
# -- Doctest ------------------------------------------------------------------
doctest_global_setup = 'import iris'
# -- Autodoc ------------------------------------------------------------------
autodoc_member_order = 'groupwise'
autodoc_default_flags = ['show-inheritance']
# include the __init__ method when documenting classes
# document the init/new method at the top level of the class documentation rather than displaying the class docstring
autoclass_content='init'
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
html_context = {'copyright_years': '2010 - {}'.format(upper_copy_year)}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {'index': 'index.html', 'gallery':'gallery.html'}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Irisdoc'
html_use_modindex = False
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'Iris.tex', u'Iris Documentation', ' \\and '.join(_authors), 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
latex_elements = {}
latex_elements['docclass'] = 'MO_report'
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'iris', u'Iris Documentation', _authors, 1)
]
##########################
# plot directive options #
##########################
plot_formats = [('png', 100),
#('hires.png', 200), ('pdf', 250)
]
# Delete the temporary value.
del _authors
| lgpl-3.0 |
mjudsp/Tsallis | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
robin-lai/scikit-learn | sklearn/tests/test_base.py | 216 | 7045 | # Author: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""Sklearn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
| bsd-3-clause |
rymurr/q | q/unparser.py | 1 | 6684 | from bitstring import pack, ConstBitStream
import pandas
import numpy as np
import datetime
from protocol import types, inv_types, header_format, MILLIS, Y2KDAYS, NULL, BYTE, INT
from utils import str_convert, format, format_list, get_header, get_date_from_q, get_hour, format_raw_list
from collections import OrderedDict
def format_bits(data, endianness = 'le', with_index=False, sort_on=None, async=False, symbol = True, function = False, short = False):
endian = 1 if endianness == 'le' else 0
data_format = header_format.format(endianness)
data = parse_on_the_wire(data, endianness, with_index=with_index, sort_on=sort_on, symbol=symbol, function = function, short=short)
length = len(data)/8 + 8
objects = {'endian':endian, 'async':1 if async else 0, 'length': length, 'data':data}
bstream = pack(data_format, **objects)
return bstream
#This is looking like it needs a refactor!
def parse_on_the_wire(data, endianness, attributes = 0, with_index=False, sort_on = None, symbol = True, function = False, short = False):
if with_index and type(data) == pandas.DataFrame:
keys = parse_on_the_wire(pandas.DataFrame(data.index), endianness, attributes, False, True if sort_on else None, short=short)
vals = parse_on_the_wire(data, endianness, attributes, False, short=short)
data_format = 'int{0}:8=type, bits'.format(endianness)
bstream = pack(data_format, (keys+vals), type='127' if sort_on else '99')
elif isinstance(data,np.ndarray):
dtype = inv_types[data.dtype.type]
if data.dtype.type == np.object_:
data_format = 'int{0}:8=type, int{0}:8=attributes, int{0}:32=length, bits'.format(endianness)
bstream = pack(data_format, sum([parse_on_the_wire(i, endianness, short=short) for i in data]), type = dtype[0], attributes=attributes, length=len(data))
else:
data_format = 'int{0}:8=type, int{0}:8=attributes, int{0}:32=length, {3}*{1}{0}:{2}'.format(endianness, dtype[1], dtype[2], len(data))
bstream = pack(data_format, *data, type=dtype[0], attributes=attributes, length=len(data))
elif isinstance(data, list):
type_set = set([type(i) for i in data])
if len(type_set) == 1 and not list(type_set)[0] == np.ndarray:
if short:
temp = list(type_set)[0]
if temp == float:
dtype = inv_types['float']
if temp == int:
dtype = inv_types['int']
else:
dtype = inv_types[list(type_set)[0]]
if list(type_set)[0] == str or list(type_set)[0] == list:
data_format = 'int{0}:8=type, int{0}:8=attributes, int{0}:32=length, bits'.format(endianness)
bstream = pack(data_format, sum([parse_on_the_wire(i, endianness, short=short) for i in data]), type = -dtype[0], attributes=attributes, length=len(data))
else:
data_format = 'int{0}:8=type, int{0}:8=attributes, int{0}:32=length, {3}*{1}{0}:{2}'.format(endianness, dtype[1], dtype[2], len(data))
bstream = pack(data_format, *data, type=-dtype[0], attributes=attributes, length=len(data))
else:
dtype = inv_types[type(data)]
data_format = 'int{0}:8=type, int{0}:8=attributes, int{0}:32=length, bits=data'.format(endianness)
bits = sum([parse_on_the_wire(i, endianness, short=short) for i in data])
bstream = pack(data_format, data=bits, type=-dtype[0], attributes=attributes, length=len(data))
elif type(data) == dict:
dtype = inv_types[type(data)]
data_format = 'int{0}:8=type, bits=data'.format(endianness)
keys = parse_on_the_wire(data.keys(), endianness, short=short)
vals = parse_on_the_wire(data.values(), endianness, short=short)
bits = keys + vals
bstream = pack(data_format, data=bits, type=dtype[0], attributes=attributes, length=len(data))
elif type(data) == OrderedDict:
dtype = inv_types[type(data)]
data_format = 'int{0}:8=type, bits=data'.format(endianness)
keys = parse_on_the_wire(data.keys(), endianness, 1, short=short)
vals = parse_on_the_wire(data.values(), endianness, short=short)
bits = keys + vals
bstream = pack(data_format, data=bits, type=dtype[0], attributes=attributes, length=len(data))
elif isinstance(data, str) and function:
context, function = data.split('{')
context = context.replace('.','')
function = '{' + function
data_format = 'int{0}:8=lambdatype, bits=context, bits=function'
bstream = pack(data_format.format(endianness), lambdatype=100, context = parse_on_the_wire(context, endianness), function = parse_on_the_wire(function, endianness, symbol=False) )
elif isinstance(data, str) and symbol:
bstream = pack('{0}*hex:8'.format(len(data)),*[hex(ord(i)) for i in data]) + ConstBitStream(b'0x00')
elif isinstance(data, str):
dtype = inv_types['str']
data_format = 'int{1}:8=type, int{1}:8=attributes, int{1}:32=length, {2}*hex:8'
bstream = pack(data_format.format('', endianness, len(data)),type=-dtype[0], attributes=attributes, length=len(data), *[hex(ord(i)) for i in data])
elif type(data) == pandas.DataFrame:
is_sorted = 1 if sort_on else 0
dtype = inv_types[type(data)]
data_format = 'int{0}:8=type, int{0}:8=tabattrib, int{0}:8=dicttype, bits=cols,int{0}:8=typearray, int{0}:8=attributes, int{0}:32=length, bits=vals'.format(endianness)
cols = parse_on_the_wire(data.columns.values, endianness, short=short)
vals = sum(parse_on_the_wire(col.values, endianness, 3 if i==sort_on else 0, short=short) for i,col in data.iterkv())
# indexes = parse_on_the_wire(data.index.values, endianness, 3)
bstream = pack(data_format, cols=cols, type=dtype[0], typearray=0, attributes=0, length=len(data.columns), vals=vals, tabattrib=is_sorted, dicttype=99)
elif type(data) == float and short:
dtype = inv_types['float']
data_format = 'int{0}:8=type, {1}{0}:{2}'.format(endianness, dtype[1], dtype[2])
bstream = pack(data_format, data, type=dtype[0])
elif type(data) == int and short:
dtype = inv_types['int']
data_format = 'int{0}:8=type, {1}{0}:{2}'.format(endianness, dtype[1], dtype[2])
bstream = pack(data_format, data, type=dtype[0])
else:
dtype = inv_types[type(data)]
data_format = 'int{0}:8=type, {1}{0}:{2}'.format(endianness, dtype[1], dtype[2])
bstream = pack(data_format, data, type=dtype[0])
return bstream
| mit |
arahuja/scikit-learn | examples/svm/plot_weighted_samples.py | 69 | 1942 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasis the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
gfyoung/scipy | scipy/stats/stats.py | 1 | 202270 | # Copyright 2002 Gary Strangman. All rights reserved
# Copyright 2002-2016 The SciPy Developers
#
# The original code from Gary Strangman was heavily adapted for
# use in SciPy by Travis Oliphant. The original code came with the
# following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
"""
A collection of basic statistical functions for Python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
sem
zmap
zscore
iqr
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
weightedtau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
brunnermunzel
combine_pvalues
Statistical Distances
---------------------
.. autosummary::
:toctree: generated/
wasserstein_distance
energy_distance
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
Support Functions
-----------------
.. autosummary::
:toctree: generated/
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
import numpy as np
from numpy import array, asarray, ma, zeros
from scipy._lib.six import callable, string_types
from scipy._lib._version import NumpyVersion
from scipy._lib._util import _lazywhere
import scipy.special as special
import scipy.linalg as linalg
from . import distributions
from . import mstats_basic
from ._stats_mstats_common import _find_repeats, linregress, theilslopes, siegelslopes
from ._stats import _kendall_dis, _toint64, _weightedrankedtau
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore',
'cumfreq', 'relfreq', 'obrientransform',
'sem', 'zmap', 'zscore', 'iqr',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'weightedtau',
'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'rankdata',
'combine_pvalues', 'wasserstein_distance', 'energy_distance',
'brunnermunzel']
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# If the check cannot be properly performed we fallback to omitting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Return the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
Examples
--------
>>> from scipy.stats import gmean
>>> gmean([1, 4])
2.0
>>> gmean([1, 2, 3, 4, 5, 6, 7])
3.3800151591412964
"""
if not isinstance(a, np.ndarray):
# if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculate the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
Examples
--------
>>> from scipy.stats import hmean
>>> hmean([1, 4])
1.6000000000000001
>>> hmean([1, 2, 3, 4, 5, 6, 7])
2.6997245179063363
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0):
# Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0 / a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater "
"than zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""
Return an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return ModeResult(np.array([]), np.array([]))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
if (NumpyVersion(np.__version__) < '1.9.0') or (a.dtype == object and np.nan in set(a)):
# Fall back to a slower method since np.unique does not work with NaN
# or for older numpy which does not support return_counts
scores = set(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mode1D(a):
vals, cnts = np.unique(a, return_counts=True)
return vals[cnts.argmax()], cnts.max()
# np.apply_along_axis will convert the _mode1D tuples to a numpy array, casting types in the process
# This recreates the results without that issue
# View of a, rotated so the requested axis is last
in_dims = list(range(a.ndim))
a_view = np.transpose(a, in_dims[:axis] + in_dims[axis+1:] + [axis])
inds = np.ndindex(a_view.shape[:-1])
modes = np.empty(a_view.shape[:-1], dtype=a.dtype)
counts = np.zeros(a_view.shape[:-1], dtype=np.int)
for ind in inds:
modes[ind], counts[ind] = _mode1D(a_view[ind])
newshape = list(a.shape)
newshape[axis] = 1
return ModeResult(modes.reshape(newshape), counts.reshape(newshape))
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
See also
--------
trim_mean : returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance.
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n / (n - 1.)
am = _mask_to_limits(a, limits, inclusive)
return np.ma.var(am, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum.
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmin : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum.
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmax : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation.
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""
Calculate the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of
points. It is often used to calculate coefficients of skewness and kurtosis
due to its close relationship with them.
Parameters
----------
a : array_like
data
moment : int or array_like of ints, optional
order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
Examples
--------
>>> from scipy.stats import moment
>>> moment([1, 2, 3, 4, 5], moment=1)
0.0
>>> moment([1, 2, 3, 4, 5], moment=2)
2.0
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n - 1) / 2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Compute the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
>>> from scipy.stats import variation
>>> variation([1, 2, 3, 4, 5])
0.47140452079103173
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
"""
Compute the skewness of a data set.
For normally distributed data, the skewness should be about 0. For
unimodal continuous distributions, a skewness value > 0 means that
there is more weight in the right tail of the distribution. The
function `skewtest` can be used to determine if the skewness value
is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
Examples
--------
>>> from scipy.stats import skew
>>> skew([1, 2, 3, 4, 5])
0.0
>>> skew([2, 8, 0, 4, 1, 9, 9, 0])
0.2650554122698573
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Compute the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
>>> from scipy.stats import kurtosis
>>> kurtosis([1, 2, 3, 4, 5])
-1.3
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
return vals - 3 if fisher else vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Compute several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int or ndarray of ints
Number of observations (length of data along `axis`).
When 'omit' is chosen as nan_policy, each column is counted separately.
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.166666666666666,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([2., 3.]), variance=array([2., 2.]),
skewness=array([0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Test whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
References
----------
.. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr.,
"A suggestion for using powerful and informative tests of
normality", American Statistician 44, pp. 316-321, 1990.
Examples
--------
>>> from scipy.stats import skewtest
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8])
SkewtestResult(statistic=1.0108048609177787, pvalue=0.3121098361421897)
>>> skewtest([2, 8, 0, 4, 1, 9, 9, 0])
SkewtestResult(statistic=0.44626385374196975, pvalue=0.6554066631275459)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000])
SkewtestResult(statistic=3.571773510360407, pvalue=0.0003545719905823133)
>>> skewtest([100, 100, 100, 100, 100, 100, 100, 101])
SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = a.shape[axis]
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Test whether a dataset has normal kurtosis.
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
Examples
--------
>>> from scipy.stats import kurtosistest
>>> kurtosistest(list(range(20)))
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348)
>>> np.random.seed(28041990)
>>> s = np.random.normal(0, 1, 1000)
>>> kurtosistest(s)
KurtosistestResult(statistic=1.2317590987707365, pvalue=0.21803908613450895)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
n = a.shape[axis]
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
term2 = np.sign(denom) * np.where(denom == 0.0, np.nan,
np.power((1-2.0/A)/np.abs(denom), 1/3.0))
if np.any(denom == 0):
msg = "Test statistic not defined in some cases due to division by " \
"zero. Return nan in that case..."
warnings.warn(msg, RuntimeWarning)
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Test whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the sample to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
Examples
--------
>>> from scipy import stats
>>> pts = 1000
>>> np.random.seed(28041990)
>>> a = np.random.normal(0, 1, size=pts)
>>> b = np.random.normal(2, 1, size=pts)
>>> x = np.concatenate((a, b))
>>> k2, p = stats.normaltest(x)
>>> alpha = 1e-3
>>> print("p = {:g}".format(p))
p = 3.27207e-11
>>> if p < alpha: # null hypothesis: x comes from a normal distribution
... print("The null hypothesis can be rejected")
... else:
... print("The null hypothesis cannot be rejected")
The null hypothesis can be rejected
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = x.size
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
@np.deprecate(message="`itemfreq` is deprecated and will be removed in a "
"future version. Use instead `np.unique(..., return_counts=True)`")
def itemfreq(a):
"""
Return a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted_ = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted_, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted_, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted_, i,
interpolation_method, axis)
for i in per]
return np.array(score)
if not (0 <= per <= 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted_.ndim
idx = per / 100. * (sorted_.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted_.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted_[tuple(indexer)] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
https://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
if np.isnan(score):
return np.nan
a = np.asarray(a)
n = len(a)
if n == 0:
return 100.0
if kind == 'rank':
left = np.count_nonzero(a < score)
right = np.count_nonzero(a <= score)
pct = (right + left + (1 if right > left else 0)) * 50.0/n
return pct
elif kind == 'strict':
return np.count_nonzero(a < score) / n * 100
elif kind == 'weak':
return np.count_nonzero(a <= score) / n * 100
elif kind == 'mean':
pct = (np.count_nonzero(a < score) + np.count_nonzero(a <= score)) / n * 50
return pct
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
def _histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separate the range into several bins and return the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Return a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Return a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / a.shape[0]
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Compute the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
return np.array(arrays)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Calculate the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculate the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculate the relative z-scores.
Return an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate',
interpolation='linear', keepdims=False):
"""
Compute the interquartile range of the data along the specified axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
`(25, 75)`. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
'raw' : No scaling, just return the raw IQR.
'normal' : Scale by :math:`2 \\sqrt{2} erf^{-1}(\\frac{1}{2}) \\approx 1.349`.
The default is 'raw'. Array-like scale is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate'
returns nan, 'raise' throws an error, 'omit' performs the
calculations ignoring nan values. Default is 'propagate'.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points `i` and `j`:
* 'linear' : `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* 'lower' : `i`.
* 'higher' : `j`.
* 'nearest' : `i` or `j` whichever is nearest.
* 'midpoint' : `(i + j) / 2`.
Default is 'linear'.
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
Notes
-----
This function is heavily dependent on the version of `numpy` that is
installed. Versions greater than 1.11.0b3 are highly recommended, as they
include a number of enhancements and fixes to `numpy.percentile` and
`numpy.nanpercentile` that affect the operation of this function. The
following modifications apply:
Below 1.10.0 : `nan_policy` is poorly defined.
The default behavior of `numpy.percentile` is used for 'propagate'. This
is a hybrid of 'omit' and 'propagate' that mostly yields a skewed
version of 'omit' since NaNs are sorted to the end of the data. A
warning is raised if there are NaNs in the data.
Below 1.9.0: `numpy.nanpercentile` does not exist.
This means that `numpy.percentile` is used regardless of `nan_policy`
and a warning is issued. See previous item for a description of the
behavior.
Below 1.9.0: `keepdims` and `interpolation` are not supported.
The keywords get ignored with a warning if supplied with non-default
values. However, multiple axes are still supported.
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, string_types):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = _iqr_nanpercentile
else:
percentile_func = _iqr_percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
rng = sorted(rng)
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims, contains_nan=contains_nan)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def _iqr_percentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around older versions of `numpy`.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if contains_nan and NumpyVersion(np.__version__) < '1.10.0a':
# I see no way to avoid the version check to ensure that the corrected
# NaN behavior has been implemented except to call `percentile` on a
# small array.
msg = "Keyword nan_policy='propagate' not correctly supported for " \
"numpy versions < 1.10.x. The default behavior of " \
"`numpy.percentile` will be used."
warnings.warn(msg, RuntimeWarning)
try:
# For older versions of numpy, there are two things that can cause a
# problem here: missing keywords and non-scalar axis. The former can be
# partially handled with a warning, the latter can be handled fully by
# hacking in an implementation similar to numpy's function for
# providing multi-axis functionality
# (`numpy.lib.function_base._ureduce` for the curious).
result = np.percentile(x, q, axis=axis, keepdims=keepdims,
interpolation=interpolation)
except TypeError:
if interpolation != 'linear' or keepdims:
# At time or writing, this means np.__version__ < 1.9.0
warnings.warn("Keywords interpolation and keepdims not supported "
"for your version of numpy", RuntimeWarning)
try:
# Special processing if axis is an iterable
original_size = len(axis)
except TypeError:
# Axis is a scalar at this point
pass
else:
axis = np.unique(np.asarray(axis) % x.ndim)
if original_size > axis.size:
# mimic numpy if axes are duplicated
raise ValueError("duplicate value in axis")
if axis.size == x.ndim:
# axis includes all axes: revert to None
axis = None
elif axis.size == 1:
# no rolling necessary
axis = axis[0]
else:
# roll multiple axes to the end and flatten that part out
for ax in axis[::-1]:
x = np.rollaxis(x, ax, x.ndim)
x = x.reshape(x.shape[:-axis.size] +
(np.prod(x.shape[-axis.size:]),))
axis = -1
result = np.percentile(x, q, axis=axis)
return result
def _iqr_nanpercentile(x, q, axis=None, interpolation='linear', keepdims=False,
contains_nan=False):
"""
Private wrapper that works around the following:
1. A bug in `np.nanpercentile` that was around until numpy version
1.11.0.
2. A bug in `np.percentile` NaN handling that was fixed in numpy
version 1.10.0.
3. The non-existence of `np.nanpercentile` before numpy version
1.9.0.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if hasattr(np, 'nanpercentile'):
# At time or writing, this means np.__version__ < 1.9.0
result = np.nanpercentile(x, q, axis=axis,
interpolation=interpolation,
keepdims=keepdims)
# If non-scalar result and nanpercentile does not do proper axis roll.
# I see no way of avoiding the version test since dimensions may just
# happen to match in the data.
if result.ndim > 1 and NumpyVersion(np.__version__) < '1.11.0a':
axis = np.asarray(axis)
if axis.size == 1:
# If only one axis specified, reduction happens along that dimension
if axis.ndim == 0:
axis = axis[None]
result = np.rollaxis(result, axis[0])
else:
# If multiple axes, reduced dimeision is last
result = np.rollaxis(result, -1)
else:
msg = "Keyword nan_policy='omit' not correctly supported for numpy " \
"versions < 1.9.x. The default behavior of numpy.percentile " \
"will be used."
warnings.warn(msg, RuntimeWarning)
result = _iqr_percentile(x, q, axis=axis)
return result
#####################################
# TRIMMING FUNCTIONS #
#####################################
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
Starting from the full sample, all elements outside the critical range are
removed, i.e. all elements of the input array `c` that satisfy either of
the following conditions ::
c < mean(c) - std(c)*low
c > mean(c) + std(c)*high
The iteration continues with the updated sample until no
elements are outside the (updated) range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std * low
critupper = c_mean + c_std * high
c = c[(c >= critlower) & (c <= critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slices off less if proportion results in a non-integer slice index (i.e.,
conservatively slices off`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[tuple(sl)]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slices off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slices off less if proportion results in a non-integer slice index
(i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[tuple(sl)], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
https://web.archive.org/web/20171027235250/http://vassarstats.net:80/textbook/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
.. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
(7.1210194716424473, 0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / bign)
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / len(a)
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= _square_of_sums(alldata) / bign
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / dfbn
msw = sswn / dfwn
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return F_onewayResult(f, prob)
def pearsonr(x, y):
r"""
Calculate a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed, and not necessarily zero-mean.
Like other correlation coefficients, this one varies between -1 and +1
with 0 implying no correlation. Correlations of -1 or +1 imply an exact
linear relationship. Positive correlations imply that as x increases, so
does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
r : float
Pearson's correlation coefficient
p-value : float
2-tailed p-value
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r_{pb} = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector :math:`x` and :math:`m_y` is
the mean of the vector :math:`y`.
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pearsonr(a, b)
(0.8660254037844386, 0.011724811003954654)
>>> stats.pearsonr([1,2,3,4,5], [5,6,7,8,7])
(0.83205029433784372, 0.080509573298498519)
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of
# floating point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = special.betainc(
0.5*df, 0.5, np.fmin(np.asarray(df / (df + t_squared)), 1.0)
)
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1, 0] > 0 and c[0, 1] > 0:
oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])
else:
oddsratio = np.inf
n1 = c[0, 0] + c[0, 1]
n2 = c[1, 0] + c[1, 1]
n = c[0, 0] + c[1, 0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1])
elif alternative == 'two-sided':
mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0, 0] < mode:
plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
pvalue = min(pvalue, 1.0)
return oddsratio, pvalue
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculate a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
if a_contains_nan:
a = ma.masked_invalid(a)
if a.size <= 1:
return SpearmanrResult(np.nan, np.nan)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
b_contains_nan, nan_policy = _contains_nan(b, nan_policy)
if a_contains_nan or b_contains_nan:
b = ma.masked_invalid(b)
if nan_policy == 'propagate':
rho, pval = mstats_basic.spearmanr(a, b, use_ties=True)
return SpearmanrResult(rho * np.nan, pval * np.nan)
if nan_policy == 'omit':
return mstats_basic.spearmanr(a, b, use_ties=True)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt(((n-2)/((rs+1.0)*(1.0-rs))).clip(0))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""
Calculate a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] D. Kornbrot "Point Biserial Correlation", In Wiley StatsRef:
Statistics Reference Online (eds N. Balakrishnan, et al.), 2014.
https://doi.org/10.1002/9781118445112.stat06227
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate', method='auto'):
"""
Calculate Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the 1945 "tau-b" version of Kendall's
tau [2]_, which can account for ties and which reduces to the 1938 "tau-a"
version [1]_ in absence of ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Unused (deprecated).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that if the input contains nan
'omit' delegates to mstats_basic.kendalltau(), which has a different
implementation.
method: {'auto', 'asymptotic', 'exact'}, optional
Defines which method is used to calculate the p-value [5]_.
'asymptotic' uses a normal approximation valid for large samples.
'exact' computes the exact p-value, but can only be used if no ties
are present. 'auto' is the default and selects the appropriate
method based on a trade-off between speed and accuracy.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
weightedtau : Computes a weighted version of Kendall's tau.
Notes
-----
The definition of Kendall's tau that is used is [2]_::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
.. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
Vol. 30, No. 1/2, pp. 81-93, 1938.
.. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
.. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
Wiley & Sons, 1967.
.. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
tables", Software: Practice and Experience, Vol. 24, No. 3,
pp. 327-336, 1994.
.. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition),
Charles Griffin & Co., 1970.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.2827454599327748
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y, method=method)
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
return ((cnt * (cnt - 1) // 2).sum(),
(cnt * (cnt - 1.) * (cnt - 2)).sum(),
(cnt * (cnt - 1.) * (2*cnt + 5)).sum())
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
dis = _kendall_dis(x, y) # discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie, x0, x1 = count_rank_tie(x) # ties in x, stats
ytie, y0, y1 = count_rank_tie(y) # ties in y, stats
tot = (size * (size - 1)) // 2
if xtie == tot or ytie == tot:
return KendalltauResult(np.nan, np.nan)
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
if method == 'exact' and (xtie != 0 or ytie != 0):
raise ValueError("Ties found, exact method cannot be used.")
if method == 'auto':
if (xtie == 0 and ytie == 0) and (size <= 33 or min(dis, tot-dis) <= 1):
method = 'exact'
else:
method = 'asymptotic'
if xtie == 0 and ytie == 0 and method == 'exact':
# Exact p-value, see Maurice G. Kendall, "Rank Correlation Methods" (4th Edition), Charles Griffin & Co., 1970.
c = min(dis, tot-dis)
if size <= 0:
raise ValueError
elif c < 0 or 2*c > size*(size-1):
raise ValueError
elif size == 1:
pvalue = 1.0
elif size == 2:
pvalue = 1.0
elif c == 0:
pvalue = 2.0/np.math.factorial(size)
elif c == 1:
pvalue = 2.0/np.math.factorial(size-1)
else:
old = [0.0]*(c+1)
new = [0.0]*(c+1)
new[0] = 1.0
new[1] = 1.0
for j in range(3,size+1):
old = new[:]
for k in range(1,min(j,c+1)):
new[k] += new[k-1]
for k in range(j,c+1):
new[k] += new[k-1] - old[k-j]
pvalue = 2.0*sum(new)/np.math.factorial(size)
elif method == 'asymptotic':
# con_minus_dis is approx normally distributed with this variance [3]_
var = (size * (size - 1) * (2.*size + 5) - x1 - y1) / 18. + (
2. * xtie * ytie) / (size * (size - 1)) + x0 * y0 / (9. *
size * (size - 1) * (size - 2))
pvalue = special.erfc(np.abs(con_minus_dis) / np.sqrt(var) / np.sqrt(2))
else:
raise ValueError("Unknown method "+str(method)+" specified, please use auto, exact or asymptotic.")
return KendalltauResult(tau, pvalue)
WeightedTauResult = namedtuple('WeightedTauResult', ('correlation', 'pvalue'))
def weightedtau(x, y, rank=True, weigher=None, additive=True):
r"""
Compute a weighted version of Kendall's :math:`\tau`.
The weighted :math:`\tau` is a weighted version of Kendall's
:math:`\tau` in which exchanges of high weight are more influential than
exchanges of low weight. The default parameters compute the additive
hyperbolic version of the index, :math:`\tau_\mathrm h`, which has
been shown to provide the best balance between important and
unimportant elements [1]_.
The weighting is defined by means of a rank array, which assigns a
nonnegative rank to each element, and a weigher function, which
assigns a weight based from the rank to each element. The weight of an
exchange is then the sum or the product of the weights of the ranks of
the exchanged elements. The default parameters compute
:math:`\tau_\mathrm h`: an exchange between elements with rank
:math:`r` and :math:`s` (starting from zero) has weight
:math:`1/(r+1) + 1/(s+1)`.
Specifying a rank array is meaningful only if you have in mind an
external criterion of importance. If, as it usually happens, you do
not have in mind a specific rank, the weighted :math:`\tau` is
defined by averaging the values obtained using the decreasing
lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the
behavior with default parameters.
Note that if you are computing the weighted :math:`\tau` on arrays of
ranks, rather than of scores (i.e., a larger value implies a lower
rank) you must negate the ranks, so that elements of higher rank are
associated with a larger value.
Parameters
----------
x, y : array_like
Arrays of scores, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
rank: array_like of ints or bool, optional
A nonnegative rank assigned to each element. If it is None, the
decreasing lexicographical rank by (`x`, `y`) will be used: elements of
higher rank will be those with larger `x`-values, using `y`-values to
break ties (in particular, swapping `x` and `y` will give a different
result). If it is False, the element indices will be used
directly as ranks. The default is True, in which case this
function returns the average of the values obtained using the
decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`).
weigher : callable, optional
The weigher function. Must map nonnegative integers (zero
representing the most important element) to a nonnegative weight.
The default, None, provides hyperbolic weighing, that is,
rank :math:`r` is mapped to weight :math:`1/(r+1)`.
additive : bool, optional
If True, the weight of an exchange is computed by adding the
weights of the ranks of the exchanged elements; otherwise, the weights
are multiplied. The default is True.
Returns
-------
correlation : float
The weighted :math:`\tau` correlation index.
pvalue : float
Presently ``np.nan``, as the null statistics is unknown (even in the
additive hyperbolic case).
See also
--------
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
This function uses an :math:`O(n \log n)`, mergesort-based algorithm
[1]_ that is a weighted extension of Knight's algorithm for Kendall's
:math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_
between rankings without ties (i.e., permutations) by setting
`additive` and `rank` to False, as the definition given in [1]_ is a
generalization of Shieh's.
NaNs are considered the smallest possible score.
.. versionadded:: 0.19.0
References
----------
.. [1] Sebastiano Vigna, "A weighted correlation index for rankings with
ties", Proceedings of the 24th international conference on World
Wide Web, pp. 1166-1176, ACM, 2015.
.. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association,
Vol. 61, No. 314, Part 1, pp. 436-439, 1966.
.. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics &
Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998.
Examples
--------
>>> from scipy import stats
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
>>> p_value
nan
>>> tau, p_value = stats.weightedtau(x, y, additive=False)
>>> tau
-0.62205716951801038
NaNs are considered the smallest possible score:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, np.nan]
>>> tau, _ = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
This is exactly Kendall's tau:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1)
>>> tau
-0.47140452079103173
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> stats.weightedtau(x, y, rank=None)
WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan)
>>> stats.weightedtau(y, x, rank=None)
WeightedTauResult(correlation=-0.7181341329699028, pvalue=nan)
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `weightedtau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
if not x.size:
return WeightedTauResult(np.nan, np.nan) # Return NaN if arrays are empty
# If there are NaNs we apply _toint64()
if np.isnan(np.sum(x)):
x = _toint64(x)
if np.isnan(np.sum(x)):
y = _toint64(y)
# Reduce to ranks unsupported types
if x.dtype != y.dtype:
if x.dtype != np.int64:
x = _toint64(x)
if y.dtype != np.int64:
y = _toint64(y)
else:
if x.dtype not in (np.int32, np.int64, np.float32, np.float64):
x = _toint64(x)
y = _toint64(y)
if rank is True:
return WeightedTauResult((
_weightedrankedtau(x, y, None, weigher, additive) +
_weightedrankedtau(y, x, None, weigher, additive)
) / 2, np.nan)
if rank is False:
rank = np.arange(x.size, dtype=np.intp)
elif rank is not None:
rank = np.asarray(rank).ravel()
if rank.size != x.size:
raise ValueError("All inputs to `weightedtau` must be of the same size, "
"found x-size %s and rank-size %s" % (x.size, rank.size))
return WeightedTauResult(_weightedrankedtau(x, y, rank, weigher, additive), np.nan)
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculate the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis. If array_like, then it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that two independent
samples have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See Also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
Suppose we have the summary data for two samples, as follows::
Sample Sample
Size Mean Variance
Sample 1 13 15.0 87.5
Sample 2 11 12.0 39.0
Apply the t-test to this data (with the assumption that the population
variances are equal):
>>> from scipy.stats import ttest_ind_from_stats
>>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)
For comparison, here is the data from which those summary statistics
were taken. With this data, we can compute the same result using
`scipy.stats.ttest_ind`:
>>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
>>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
>>> from scipy.stats import ttest_ind
>>> ttest_ind(a, b)
Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486)
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculate the T-test for the means of *two independent* samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculate the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = n - 1
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D, pval_two)
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", https://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculate a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171022032306/http://vassarstats.net:80/textbook/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
def ks_2samp(data1, data2):
"""
Compute the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
statistic : float
KS statistic
pvalue : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / n1
cdf2 = np.searchsorted(data2, data_all, side='right') / n2
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / (n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except Exception:
warnings.warn('This should not happen! Please open an issue at '
'https://github.com/scipy/scipy/issues and provide the code '
'you used to trigger this warning.\n')
prob = 1.0
return Ks_2sampResult(d, prob)
def tiecorrect(rankvals):
"""
Tie correction factor for ties in the Mann-Whitney U and
Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
def mannwhitneyu(x, y, use_continuity=True, alternative=None):
"""
Compute the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
alternative : None (deprecated), 'less', 'two-sided', or 'greater'
Whether to get the p-value for the one-sided hypothesis ('less'
or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to None, which results in a p-value half the size of
the 'two-sided' p-value and a different U statistic. The
default behavior is not the same as using 'less' or 'greater':
it only exists for backward compatibility and is deprecated.
Returns
-------
statistic : float
The Mann-Whitney U statistic, equal to min(U for x, U for y) if
`alternative` is equal to None (deprecated; exists for backward
compatibility), and U for y otherwise.
pvalue : float
p-value assuming an asymptotic normal distribution. One-sided or
two-sided, depending on the choice of `alternative`.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
References
----------
.. [1] https://en.wikipedia.org/wiki/Mann-Whitney_U_test
.. [2] H.B. Mann and D.R. Whitney, "On a Test of Whether one of Two Random
Variables is Stochastically Larger than the Other," The Annals of
Mathematical Statistics, vol. 18, no. 1, pp. 50-60, 1947.
"""
if alternative is None:
warnings.warn("Calling `mannwhitneyu` without specifying "
"`alternative` is deprecated.", DeprecationWarning)
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in mannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative is None or alternative == 'two-sided':
bigu = max(u1, u2)
elif alternative == 'less':
bigu = u1
elif alternative == 'greater':
bigu = u2
else:
raise ValueError("alternative should be None, 'less', 'greater' "
"or 'two-sided'")
z = (bigu - meanrank) / sd
if alternative is None:
# This behavior, equal to half the size of the two-sided
# p-value, is deprecated.
p = distributions.norm.sf(abs(z))
elif alternative == 'two-sided':
p = 2 * distributions.norm.sf(abs(z))
else:
p = distributions.norm.sf(z)
u = u2
# This behavior is deprecated.
if alternative is None:
u = min(u1, u2)
return MannwhitneyuResult(u, p)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
See Also
--------
f_oneway : 1-way ANOVA
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.0301973834223185)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / n[i]
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""
Compute the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
the test statistic, correcting for ties
pvalue : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] https://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('Less than 3 levels. Friedman test not appropriate.')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / (k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
BrunnerMunzelResult = namedtuple('BrunnerMunzelResult',
('statistic', 'pvalue'))
def brunnermunzel(x, y, alternative="two-sided", distribution="t",
nan_policy='propagate'):
"""
Computes the Brunner-Munzel test on samples x and y
The Brunner-Munzel test is a nonparametric test of the null hypothesis that
when values are taken one by one from each group, the probabilities of
getting large values in both groups are equal.
Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the
assumption of equivariance of two groups. Note that this does not assume
the distributions are same. This test works on two independent samples,
which may have different sizes.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
alternative : 'less', 'two-sided', or 'greater', optional
Whether to get the p-value for the one-sided hypothesis ('less'
or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults value is 'two-sided' .
distribution: 't' or 'normal', optional
Whether to get the p-value by t-distribution or by standard normal
distribution.
Defaults value is 't' .
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Brunner-Munzer W statistic.
pvalue : float
p-value assuming an t distribution. One-sided or
two-sided, depending on the choice of `alternative` and `distribution`.
See Also
--------
mannwhitneyu : Mann-Whitney rank test on two samples.
Notes
-------
Brunner and Munzel recommended to estimate the p-value by t-distribution
when the size of data is 50 or less. If the size is lower than 10, it would
be better to use permuted Brunner Munzel test (see [2]_).
References
----------
.. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher
problem: Asymptotic theory and a small-sample approximation".
Biometrical Journal. Vol. 42(2000): 17-25.
.. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the
non-parametric Behrens-Fisher problem". Computational Statistics and
Data Analysis. Vol. 51(2007): 5192-5204.
Examples
--------
>>> from scipy import stats
>>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1]
>>> x2 = [3,3,4,3,1,2,3,1,1,5,4]
>>> w, p_value = stats.brunnermunzel(x1, x2)
>>> w
3.1374674823029505
>>> p_value
0.0057862086661515377
"""
x = np.asarray(x)
y = np.asarray(y)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == "omit" or npy == "omit":
nan_policy = "omit"
if contains_nan and nan_policy == "propagate":
return BrunnerMunzelResult(np.nan, np.nan)
elif contains_nan and nan_policy == "omit":
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.brunnermunzel(x, y, alternative, distribution)
nx = len(x)
ny = len(y)
if nx == 0 or ny == 0:
return BrunnerMunzelResult(np.nan, np.nan)
rankc = rankdata(np.concatenate((x, y)))
rankcx = rankc[0:nx]
rankcy = rankc[nx:nx+ny]
rankcx_mean = np.mean(rankcx)
rankcy_mean = np.mean(rankcy)
rankx = rankdata(x)
ranky = rankdata(y)
rankx_mean = np.mean(rankx)
ranky_mean = np.mean(ranky)
Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0))
Sx /= nx - 1
Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0))
Sy /= ny - 1
wbfn = nx * ny * (rankcy_mean - rankcx_mean)
wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy)
if distribution == "t":
df_numer = np.power(nx * Sx + ny * Sy, 2.0)
df_denom = np.power(nx * Sx, 2.0) / (nx - 1)
df_denom += np.power(ny * Sy, 2.0) / (ny - 1)
df = df_numer / df_denom
p = distributions.t.cdf(wbfn, df)
elif distribution == "normal":
p = distributions.norm.cdf(wbfn)
else:
raise ValueError(
"distribution should be 't' or 'normal'")
if alternative == "greater":
p = p
elif alternative == "less":
p = 1 - p
elif alternative == "two-sided":
p = 2 * np.min([p, 1-p])
else:
raise ValueError(
"alternative should be 'less', 'greater' or 'two-sided'")
return BrunnerMunzelResult(wbfn, p)
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# STATISTICAL DISTANCES #
#####################################
def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the first Wasserstein distance between two 1D distributions.
This distance is also known as the earth mover's distance, since it can be
seen as the minimum amount of "work" required to transform :math:`u` into
:math:`v`, where "work" is measured as the amount of distribution weight
that must be moved, multiplied by the distance it has to be moved.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The first Wasserstein distance between the distributions :math:`u` and
:math:`v` is:
.. math::
l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times
\mathbb{R}} |x-y| \mathrm{d} \pi (x, y)
where :math:`\Gamma (u, v)` is the set of (probability) distributions on
:math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and
:math:`v` on the first and second factors respectively.
If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and
:math:`v`, this distance also equals to:
.. math::
l_1(u, v) = \int_{-\infty}^{+\infty} |U-V|
See [2]_ for a proof of the equivalence of both definitions.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Wasserstein metric", https://en.wikipedia.org/wiki/Wasserstein_metric
.. [2] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related
Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`.
Examples
--------
>>> from scipy.stats import wasserstein_distance
>>> wasserstein_distance([0, 1, 3], [5, 6, 8])
5.0
>>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2])
0.25
>>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4],
... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5])
4.0781331438047861
"""
return _cdf_distance(1, u_values, v_values, u_weights, v_weights)
def energy_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the energy distance between two 1D distributions.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The energy distance between two distributions :math:`u` and :math:`v`, whose
respective CDFs are :math:`U` and :math:`V`, equals to:
.. math::
D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| -
\mathbb E|Y - Y'| \right)^{1/2}
where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are
independent random variables whose probability distribution is :math:`u`
(resp. :math:`v`).
As shown in [2]_, for one-dimensional real-valued variables, the energy
distance is linked to the non-distribution-free version of the Cramer-von
Mises distance:
.. math::
D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2
\right)^{1/2}
Note that the common Cramer-von Mises criterion uses the distribution-free
version of the distance. See [2]_ (section 2), for more details about both
versions of the distance.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance
.. [2] Szekely "E-statistics: The energy of statistical samples." Bowling
Green State University, Department of Mathematics and Statistics,
Technical Report 02-16 (2002).
.. [3] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews:
Computational Statistics, 8(1):27-38 (2015).
.. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
Examples
--------
>>> from scipy.stats import energy_distance
>>> energy_distance([0], [2])
2.0000000000000004
>>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2])
1.0000000000000002
>>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ],
... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8])
0.88003340976158217
"""
return np.sqrt(2) * _cdf_distance(2, u_values, v_values,
u_weights, v_weights)
def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute, between two one-dimensional distributions :math:`u` and
:math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the
statistical distance that is defined as:
.. math::
l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p}
p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2
gives the energy distance.
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
"""
u_values, u_weights = _validate_distribution(u_values, u_weights)
v_values, v_weights = _validate_distribution(v_values, v_weights)
u_sorter = np.argsort(u_values)
v_sorter = np.argsort(v_values)
all_values = np.concatenate((u_values, v_values))
all_values.sort(kind='mergesort')
# Compute the differences between pairs of successive values of u and v.
deltas = np.diff(all_values)
# Get the respective positions of the values of u and v among the values of
# both distributions.
u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right')
v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right')
# Calculate the CDFs of u and v using their weights, if specified.
if u_weights is None:
u_cdf = u_cdf_indices / u_values.size
else:
u_sorted_cumweights = np.concatenate(([0],
np.cumsum(u_weights[u_sorter])))
u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1]
if v_weights is None:
v_cdf = v_cdf_indices / v_values.size
else:
v_sorted_cumweights = np.concatenate(([0],
np.cumsum(v_weights[v_sorter])))
v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1]
# Compute the value of the integral based on the CDFs.
# If p = 1 or p = 2, we avoid using np.power, which introduces an overhead
# of about 15%.
if p == 1:
return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas))
if p == 2:
return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas)))
return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p),
deltas)), 1/p)
def _validate_distribution(values, weights):
"""
Validate the values and weights from a distribution input of `cdf_distance`
and return them as ndarray objects.
Parameters
----------
values : array_like
Values observed in the (empirical) distribution.
weights : array_like
Weight for each value.
Returns
-------
values : ndarray
Values as ndarray.
weights : ndarray
Weights as ndarray.
"""
# Validate the value array.
values = np.asarray(values, dtype=float)
if len(values) == 0:
raise ValueError("Distribution can't be empty.")
# Validate the weight array, if specified.
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(weights) != len(values):
raise ValueError('Value and weight array-likes for the same '
'empirical distribution must be of the same size.')
if np.any(weights < 0):
raise ValueError('All weights must be non-negative.')
if not 0 < np.sum(weights) < np.inf:
raise ValueError('Weight array-like sum must be positive and '
'finite. Set as None for an equal distribution of '
'weight.')
return values, weights
return values, None
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""
Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
def _sum_of_squares(a, axis=0):
"""
Square each element of the input array, and return the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def _square_of_sums(a, axis=0):
"""
Sum elements of the input array, and return the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
def rankdata(a, method='average'):
"""
Assign ranks to data, dealing with ties appropriately.
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked. The array is first flattened.
method : str, optional
The method used to assign ranks to tied elements.
The options are 'average', 'min', 'max', 'dense' and 'ordinal'.
'average':
The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
'min':
The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
'max':
The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
'dense':
Like 'min', but the rank of the next highest element is assigned
the rank immediately after those assigned to the tied elements.
'ordinal':
All values are given a distinct rank, corresponding to the order
that the values occur in `a`.
The default is 'average'.
Returns
-------
ranks : ndarray
An array of length equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
| bsd-3-clause |
dmsul/econtools | econtools/metrics/tests/data/src_areg.py | 1 | 2317 | import pandas as pd
import numpy as np
class regout(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
stat_names=['coeff', 'se', 't', 'p>t', 'CI_low', 'CI_high']
var_names=['mpg', 'length', ]
areg_std = regout(
summary=pd.DataFrame(np.array([
[-221.7118772462534,
92.44982055711751,
-2.398186128541752,
.0217841023009911,
-409.2088037098624,
-34.21495078264431,
],
[26.87059780428358,
27.27089677764448,
.9853213857752909,
.3310425190804239,
-28.43734435181426,
82.17853996038141,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[8546.969321043227,
1386.152836419678,
],
[1386.152836419678,
743.7018110569401,
],
]),
columns=var_names,
index=var_names),
N=51,
r2=.2706428078700199,
r2_a=-.0332560221841385,
mss=45779384.36530431,
tss=np.nan,
rss=123371182.4854894,
kappa=np.nan,
F=6.679265789418835,
pF=.0034113388273972,
)
areg_robust = regout(
summary=pd.DataFrame(np.array([
[-221.7118772462534,
107.6726039118124,
-2.059129891832496,
.0467716530581268,
-440.0820393097441,
-3.341715182762641,
],
[26.87059780428358,
25.37851363055508,
1.058793205758594,
.2967467758625452,
-24.59941344364574,
78.3406090522129,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[11593.38963315003,
1661.217997284032,
],
[1661.217997284032,
644.0689540962698,
],
]),
columns=var_names,
index=var_names),
N=51,
r2=.2706428078700199,
r2_a=-.0332560221841385,
mss=45779384.36530431,
tss=np.nan,
rss=123371182.4854894,
kappa=np.nan,
F=6.354414352280316,
pF=.0043301870908332,
)
areg_cluster = regout(
summary=pd.DataFrame(np.array([
[-221.7118772462534,
126.1113575899555,
-1.758064313026729,
.1041900769077557,
-496.48492113,
53.0611666374933,
],
[26.87059780428358,
20.23492532140688,
1.327931651709962,
.2089065255214493,
-17.21751709335599,
70.95871270192313,
],
]),
columns=stat_names,
index=var_names),
vce=pd.DataFrame(np.array([
[15904.07451318163,
1754.847842924423,
],
[1754.847842924423,
409.4522027629133,
],
]),
columns=var_names,
index=var_names),
N=51,
r2=.2706428078700199,
r2_a=-.0332560221841385,
mss=45779384.36530431,
tss=np.nan,
rss=123371182.4854894,
kappa=np.nan,
F=7.650388737405658,
pF=.0072116955739989,
)
| bsd-3-clause |
bjsmith/motivation-simulation | SetupBaseStudentModel.py | 1 | 4249 | __author__ = 'benjaminsmith'
import numpy as np
import time
import os
import matplotlib.pyplot as plt #we might do some other tool later.
from ActionModel import ActionModel
import sys
from BisbasModel import BisbasModel
from UnitModel import *
i_food = 0
i_friends = 1
i_partner = 2
i_study = 3
i_fear_threat = 4
def setup_base_student_model():
baseline_pos_expectancy=1
baseline_neg_expectancy=1
action_state_elicitations=10
#for this model we're going to try build a model which has an explicit desire to seek positive feedback.
#So we're going to make "mood" an extra Need.
#There's no explicit Elicitor for mood, but various elicitors will elicit it to varying degrees.
#actually, we can't do this under the current model because there's no explicit connection between state and elicitors
#only each of their independent connections to actions.
#Mood competes as a Internal State like everything else...
#There's no explicit Action associated with Mood, but most actions have some effect on it.
#Learning: positive feedback increases mood; negative feedback decreases it. Not sure how to implement this..
#let's start with a model similiar to the last but with just one extra State, Mood.
bb=BisbasModel(
states =
[{"name":"Hunger", "value":1},
{"name":"Need for affiliation", "value":1},
{"name":"RomanticNeed","value":1},
{"name":"Need for achievement","value":0.7},
{"name":"Fear","value":0}],
actions =
UnitModel.GetLayerOfUnits([
{"name":"Eat", "value":0},
{"name":"Meet friends", "value":0},
{"name":"Approach potential partner","value":0},
{"name":"Study","value":0},
{"name":"Flee from threat","value":0}]),
elicitors =
[{"name":"Food", "value":0},
{"name":"Friends", "value":0},
{"name":"Potential partner","value":0},
{"name":"Library","value":0},
{"name":"Danger","value":0}],
baseline_pos_expectancy=1,
baseline_neg_expectancy=1,
baseline_action_threshold=2,
learning_rate=0.05,
action_tendency_persistence=1-0.10,
satiation_power=0.05,
consummatory_power=0.05,
gains_v_losses=1.5)
bb.display_current_state_text()
#keep it simple - map each action to the corresponding state
#model doesn't do anything: there's no environment elicitation. let's bring some food in.
bb.elicitors[i_food].value=1.0
bb.actions[i_food].neg_expectancy=0.25
bb.actions[i_food].neg_val=0.25
bb.actions[i_study].neg_expectancy=0.36
bb.actions[i_study].neg_val=0.2
#approaching a potential partner carries a high risk of rejection
#while meeting friends carries very little negative but also lesser positive expected gain
#partner
bb.actions[i_partner].pos_val=2
bb.actions[i_partner].pos_expectancy=2
bb.actions[i_partner].neg_val=2.0
bb.actions[i_partner].neg_expectancy=0.825
#friend
bb.actions[i_friends].pos_val=0.8
bb.actions[i_friends].pos_expectancy=0.8
bb.actions[i_friends].neg_val=0.05
bb.actions[i_friends].neg_expectancy=0.05
bb.actions[i_friends].pos_expectancy=0.8
bb.actions[i_friends].neg_expectancy=0.2
#threat
bb.actions[i_fear_threat].pos_val=4
bb.actions[i_fear_threat].pos_expectancy=4
bb.actions[i_fear_threat].neg_val=0
bb.actions[i_fear_threat].neg_expectancy=0
#sensible consummatory values.
# "approach potential partner" will exhaust the opportunity after one turn.
bb.action_elicitor[i_friends,i_friends]=0
# "meet friends" doesn't really get 'consumed' at all..
bb.action_elicitor[i_partner,i_partner]=1
#studying also doesn't get 'consumed'.
bb.action_elicitor[i_study,i_study]=0
#neither is the need satiated very quickly.
bb.action_state[i_study,i_study]=bb.action_state[i_study,i_study]/8
#fleeing will quickly remove the threat.
bb.action_elicitor[i_fear_threat,i_fear_threat]=0.5
#but fear lasts a little longer.
bb.action_state[i_fear_threat,i_fear_threat]=bb.action_state[i_fear_threat,i_fear_threat]*2
return bb
| gpl-3.0 |
haochenuw/eigen-mpc | plots/phase1_time.py | 2 | 2313 | from matplotlib import rc, pyplot as plt
import numpy as np
import os
import sys
import json
ns = [i * 10**j for j in [3,4,5] for i in [1,2,5]] + [1000000]
ds = [10, 20, 50, 100, 200, 500]
num_samples = 4
ps = [2,3,5]#[1, 2, 5, 10, 20]
# n x d x num_parties x sample x party -> (cputime, waittime, realtime, bytes sent, flush count)
data = np.zeros((len(ns), len(ds), len(ps), num_samples, max(ps)+2, 5), dtype=np.float64)
data[:] = np.NAN
for i_n, n in enumerate(ns):
for i_d, d in enumerate(ds):
if d == 500 and n == 1000000:
continue
for i_p, num_parties in enumerate(ps):
for sample in xrange(num_samples):
for party in xrange(num_parties+2):
try:
filename = "../experiments/results/phase1/test_LR_{}x{}_{}_{}_p{}.out".format(n, d, num_parties, sample, party+1)
lines = open(filename).read().splitlines()
if party == 0:
lines = lines[1:] # ignore first line for CSP (contains only info we already know)
current_data = json.loads(lines[0]) # TODO make sure that the times are on the first line
data[i_n, i_d, i_p, sample, party, :3] = [current_data[key] for key in ['cputime', 'wait_time', 'realtime']]
data[i_n, i_d, i_p, sample, party, 3] = np.sum(np.fromstring(lines[1][1:-1], sep=',')) # bytes sent
except TypeError, e:
#pass
print filename, e
# cpu + running time
for i_n, n in enumerate(ns):
sys.stdout.write("\midrule\n\\multirow{%d}{*}{%d}\n" % (len(ds),n))
for i_d, d in enumerate(ds):
if d == 500 and n == 1000000:
continue
#if i_d != 0:
# sys.stdout.write(10 * " ")
sys.stdout.write("& {:<10d}".format(d))
for i_p, num_parties in enumerate(ps):
time_ti = np.nanmean(data[i_n, i_d, i_p, :, 0, 0])
time_parties = np.nanmean(data[i_n, i_d, i_p, :, 2:, 0])
time_total = np.nanmean(data[i_n, i_d, i_p, :, num_parties+1, 2])
sys.stdout.write("& {:9.3f} & {:9.3f} & {:9.3f} ".format(time_ti, time_parties, time_total))
sys.stdout.write("\\\\\n")
| gpl-3.0 |
CnrLwlss/Colonyzer | scripts/parseAndRun2.py | 1 | 17440 | import colonyzer2 as c2
import json
import argparse
import shutil
import string
import os
import time
import numpy as np
import itertools
import cv2
from matplotlib.backends.backend_pdf import PdfPages, FigureCanvasPdf
from matplotlib import figure
from PIL import Image,ImageDraw
from scipy import ndimage
from pkg_resources import resource_filename, Requirement
def checkImages(fdir,fdict=None,barcRange=(0,-24),verbose=False):
'''Discover barcodes in current working directory (or in fdir or in those specified in fdict) for which analysis has not started.'''
if fdict!=None:
with open(fdict, 'rb') as fp:
barcdict = json.load(fp)
# Drop any barcodes that are currently being analysed/already analysed
barcdict={x:barcdict[x] for x in barcdict.keys() if not c2.checkAnalysisStarted(barcdict[x][-1])}
else:
# Find image files which have yet to be analysed
# Lydall lab file naming convention (barcRange)
# First 15 characters in filename identify unique plates
# Remaining charaters can be used to store date, time etc.
barcdict=c2.getBarcodes(fdir,barcRange,verbose=verbose)
return(barcdict)
def parseArgs(inp=''):
'''Define console script behaviour, hints and documentation for setting off Colonyzer analysis.'''
parser=argparse.ArgumentParser(description="Analyse timeseries of QFA images: locate cultures on plate, segment image into agar and cells, apply lighting correction, write report including cell density estimates for each location in each image. If you need to specify initial guesses for colony locations, you must provide a Colonyzer.txt file (as generated by ColonyzerParametryzer) describing initial guess for culture array in the directory containing the images to be analysed.")
parser.add_argument("-c","--lc", help="Enable lighting correction?", action="store_true")
parser.add_argument("-m","--diffims", help="If lighting correction switched on, attempt to correct for lighting differences between images in timecourse (can induce slight negative cell density estimates).", action="store_true")
parser.add_argument("-p","--plots", help="Plot pixel intensity distributions, segmentation thresholds and spot location traces?", action="store_true")
parser.add_argument("-i","--initpos", help="Use intial guess for culture positions from Colonyzer.txt file?", action="store_true")
parser.add_argument("-x","--cut", help="Cut culture signal from first image to make pseudo-empty plate?", action="store_true")
parser.add_argument("-q","--quiet", help="Suppress messages printed to screen during analysis?", action="store_true")
parser.add_argument("-e","--endpoint", help="Only analyse final image in series. Mostly for testing single image analysis.",action="store_true")
parser.add_argument("-k","--edgemask", help="Use intensity gradient & morphology for image segmentation instead of thresholding.",action="store_true")
parser.add_argument("-g","--greenlab", help="Check for presence of GreenLab lids on plates.",action="store_true")
parser.add_argument("-d","--dir", type=str, help="Directory in which to search for image files that have not been analysed (current directory by default).",default=".")
parser.add_argument("-l","--logsdir", type=str, help="Directory in which to search for JSON files listing images for analyis (e.g. LOGS3, root of HTS filestore). Only used when intending to specify images for analysis in .json file (see -u).",default=".")
parser.add_argument("-f","--fixthresh", type=float, help="Image segmentation threshold value (default is automatic thresholding).")
parser.add_argument("-u","--usedict", type=str, help="Load .json file specifying images to analyse. If argument has a .json extension, treat as filename. Otherwise assume argument is a HTS-style screen ID and return path to appropriate .json file from directory structure. See C2Find.py in HTSauto package.")
parser.add_argument("-o","--fmt", type=str, nargs='+', help="Specify rectangular grid format, either using integer shorthand (e.g. -o 96, -o 384, -o 768 -o 1536) or explicitly specify number of rows followed by number of columns (e.g.: -o 24 16 or -o 24x16).", default=['384'])
parser.add_argument("-t","--updates", type=int, help="Number of (quasi-)randomly distributed grid positions to assess in first phase of grid location. Ignored when -initpos specified.", default=144)
if inp=="":
args = parser.parse_args()
else:
args = parser.parse_args(inp.split())
return(args)
def buildVars(inp=''):
'''Read user input, set up flags for analysis, report on options chosen and find files to be analysed.'''
inp=parseArgs(inp)
if inp.quiet:
verbose=False
else:
verbose=True
if inp.dir==None:
fdir=os.getcwd()
else:
fdir=os.path.realpath(inp.dir)
if inp.fixthresh!=None and not inp.edgemask:
fixedThresh=inp.fixthresh
else:
fixedThresh=-99
if len(inp.fmt)>2:
print("Woah! Too many dimensions specified for rectangular grid format!")
nrow,ncol=(0,0)
elif len(inp.fmt)==1:
nrow,ncol=c2.parsePlateFormat(inp.fmt[0])
else:
nrow,ncol=[int(x) for x in inp.fmt]
print("Expecting {} rows and {} columns on plate.".format(nrow,ncol))
if inp.usedict is None:
fdict=None
elif inp.usedict[-5:] in [".json",".JSON"]:
fdict=os.path.realpath(inp.usedict)
else:
fdict=locateJSON(inp.usedict,os.path.realpath(inp.logsdir),verbose)
if fdict is not None and not os.path.exists(fdict): print("WARNING! "+fdict+" does not exist...")
if inp.lc:
diffIms=True
else:
diffIms=False
if inp.lc and inp.cut:
cut=True
else:
cut=False
if verbose:
if inp.lc:
print("Lighting correction turned on.")
else:
print("Lighting correction turned off.")
if inp.lc:
if inp.diffims:
print("Correcting for lighting differences between subsequent images of same plate.")
else:
print("Any lighting differences between plates will be ignored.")
if inp.plots:
print("Reports on spot location and thresholding will appear in Output_Reports directory.")
else:
print("No reports on spot location or thresholding will be generated.")
if inp.initpos:
print("Using user-specified initial guess for colony locations. NOTE: Colonyzer.txt file must be located in directory with images to be analysed. See Parametryzer for more information.")
else:
print("Searching for colony locations automatically.")
print("Checking "+str(inp.updates)+" (quasi-random) candidate grid positions in first phase of grid location")
if inp.greenlab:
print("Removing images identified as containing GreenLab lids.")
if inp.lc:
if inp.cut:
print("Cutting cell signal from first image to create pseudo-empty plate (for lighting correction).")
else:
print("Using first plate (without segmenting) as best estimate of pseudo-empty plate (for lighting correction).")
if fixedThresh==-99 and not inp.edgemask:
print("Image segmentation by automatic thresholding.")
elif not inp.edgemask:
print("Images will be segmented using fixed threshold: "+str(fixedThresh)+".")
else:
print("Images will be segemented by intensity gradient and morphology instead of by thresholding.")
if fdict is not None and os.path.exists(fdict):
print("Preparing to load barcodes from "+fdict+".")
res={'lc':inp.lc,'fixedThresh':fixedThresh,'plots':inp.plots,'initpos':inp.initpos,'fdict':fdict,'fdir':fdir,'nrow':nrow,'ncol':ncol,'cut':cut,'verbose':verbose,'diffims':diffIms,'updates':inp.updates,'endpoint':inp.endpoint,'edgemask':inp.edgemask,'greenlab':inp.greenlab}
return(res)
def locateJSON(scrID,dirHTS='.',verbose=False):
exptType=scrID[0:-4]
fdict=os.path.join(dirHTS,exptType+"_EXPERIMENTS",scrID,"AUXILIARY",scrID+"_C2.json")
return(fdict)
def prepareTimecourse(barcdict,verbose=False,chklid=lambda x:False):
'''In timecourse mode, prepares "next" batch of images for analysis from dictionary of image names (unique image barcodes are dictionary keys).'''
BARCs=sorted(barcdict)
BARCODE=BARCs[0]
imdir=os.path.dirname(barcdict[BARCODE][0])
IMs=[b for b in barcdict[BARCODE] if not chklid(b)]
LATESTIMAGE=IMs[0]
EARLIESTIMAGE=IMs[-1]
imRoot=EARLIESTIMAGE.split(".")[0]
if verbose:
print("Analysing images labelled with the barcode "+BARCODE+" in "+imdir)
print("Earliest image: "+EARLIESTIMAGE)
print("Latest image: "+LATESTIMAGE)
return((BARCODE,imdir,LATESTIMAGE,EARLIESTIMAGE,imRoot))
def loadLocationGuesses(IMAGE,InsData):
'''Set up initial guesses for location of (centres of) spots on image by parsing data from Colonyzer.txt'''
# If we have ColonyzerParametryzer output for this filename, use it for initial culture location estimates
if os.path.basename(IMAGE) in InsData:
(candx,candy,dx,dy)=c2.SetUp(InsData[os.path.basename(IMAGE)])
# If there are multiple calibrations available, choose the best one based on date of image capture
elif any(isinstance(el, list) for el in InsData['default']):
imname=os.path.basename(IMAGE).split(".")[0]
imdate=imname[-19:-9]
(candx,candy,dx,dy)=c2.SetUp(InsData['default'],imdate)
else:
(candx,candy,dx,dy)=c2.SetUp(InsData['default'])
return((candx,candy,dx,dy))
def cutEdgesFromMask(mask,locations,dx,dy):
'''Mask for identifying culture areas (edge detection). Set all pixels outside culture grid to background, to aid binary filling later.'''
mask[0:min(locations.y-dy/2),:]=False
mask[max(locations.y+dy/2):mask.shape[0],:]=False
mask[:,0:min(locations.x-dx/2)]=False
mask[:,max(locations.x+dx/2):mask.shape[1]]=False
return(mask)
def edgeFill(arr,locations,dx,dy,cutoff=0.8):
edgeN=c2.getEdges(arr,cutoff=cutoff)
dilN=ndimage.morphology.binary_dilation(edgeN,iterations=2)
erodeN=ndimage.morphology.binary_erosion(dilN,iterations=1)
dil2N=ndimage.morphology.binary_dilation(dilN,iterations=3)
fillN=ndimage.morphology.binary_fill_holes(cutEdgesFromMask(dil2N,locations,dx,dy))
maskN=ndimage.morphology.binary_erosion(fillN,iterations=7)
return(maskN)
def edgeFill2(arr,cutoff=0.8):
edgeN=c2.getEdges(arr,cutoff=cutoff)
er1=ndimage.morphology.binary_erosion(edgeN,iterations=2)
di1=ndimage.morphology.binary_dilation(er1,iterations=4)
edgemap=np.logical_and(di1,edgeN)
fillN=ndimage.morphology.binary_fill_holes(edgemap)
return(fillN)
def main(inp=""):
print("Colonyzer "+c2.__version__)
var=buildVars(inp=inp)
correction,fixedThresh,plots,initpos,fdict,fdir,nrow,ncol,cut,verbose,diffIms,updates,endpoint,edgemask,greenlab=(var["lc"],var["fixedThresh"],var["plots"],var["initpos"],var["fdict"],var["fdir"],var["nrow"],var["ncol"],var["cut"],var["verbose"],var["diffims"],var["updates"],var["endpoint"],var["edgemask"],var["greenlab"])
barcdict=checkImages(fdir,fdict,verbose=verbose)
if greenlab:
barcdict={x:barcdict[x] for x in barcdict.keys() if not c2.checkAnalysisStarted(barcdict[x][-1])}
rept=c2.setupDirectories(barcdict,verbose=verbose)
if greenlab:
posfiles=[resource_filename(Requirement.parse("colonyzer2"),os.path.join("data",f)) for f in ["GreenLabLid.png","CornerLid.png","BottomRightLid.png"]]
negfiles=[resource_filename(Requirement.parse("colonyzer2"),os.path.join("data",f)) for f in ["GreenLabNoLid.png","CornerNoLid.png","BottomRightNoLid.png"]]
frects=[[0.15,0.85,0.5,1.0],[0.0,0.8,0.125,1.0],[0.85,0.8,1.0,1.0]]
pdistThresh,ndistThresh=50.3,53.333
checkLid=c2.makeLidTest(posfiles,negfiles,frects,pdistThresh,ndistThresh,False)
else:
def checkLid(fname): False
start=time.time()
while len(barcdict)>0:
BARCODE,imdir,LATESTIMAGE,EARLIESTIMAGE,imRoot=prepareTimecourse(barcdict,verbose=verbose,chklid=checkLid)
if plots:
pdf=PdfPages(os.path.join(os.path.dirname(EARLIESTIMAGE),"Output_Reports",os.path.basename(EARLIESTIMAGE).split(".")[0]+".pdf"))
else:
pdf=None
# Create empty file to indicate that barcode is currently being analysed, to allow parallel analysis (lock files)
tmp=open(os.path.join(os.path.dirname(EARLIESTIMAGE),"Output_Data",os.path.basename(EARLIESTIMAGE).split(".")[0]+".out"),"w").close()
# Get latest image for thresholding and detecting culture locations
imN,arrN=c2.openImage(LATESTIMAGE)
# Get earliest image for lighting gradient correction
if (LATESTIMAGE==EARLIESTIMAGE) or endpoint:
im0,arr0=imN,arrN
arrloc=arrN
else:
im0,arr0=c2.openImage(EARLIESTIMAGE)
arrloc=arrN-arr0
#arrloc=np.maximum(0,arrloc)
if initpos:
InsData=c2.readInstructions(os.path.dirname(LATESTIMAGE),searchUpStream=True)
# Load initial guesses from Colonyzer.txt file
(candx,candy,dx,dy)=loadLocationGuesses(LATESTIMAGE,InsData)
corner=[0,0]; com=[0,0]; guess=[0,0]
# NOTE: assumes that grid returned by loadLocationGuesses is parallel to image edges
ny=nrow=len(np.unique(candy))
nx=ncol=len(np.unique(candx))
else:
# Automatically generate guesses for gridded array locations
(candx,candy,dx,dy,corner,com,guess)=c2.estimateLocations(arrloc,ncol,nrow,showPlt=plots,pdf=pdf,glob=False,verbose=verbose,nsol=updates)
# Update guesses and initialise locations data frame
locationsN=c2.locateCultures([int(round(cx-dx/2.0)) for cx in candx],[int(round(cy-dy/2.0)) for cy in candy],dx,dy,arrloc,ncol,nrow,update=True)
mask=edgeFill2(arrN,0.9)
c2.showIm(mask)
grd=mask.copy()
grd[:,:]=False
grd[min(locationsN.y-dy/2):max(locationsN.y+dy/2),min(locationsN.x-dx/2):max(locationsN.x+dx/2)]=True
spots=np.logical_and(grd,mask)
agar=np.logical_and(grd,~mask)
ave0=np.mean(arr0[agar])
thresh=-99
if correction:
if cut:
pseudoempty = np.array(np.round(arr0.copy()),dtype=np.uint8)
bytemask = np.asarray(spots*255, dtype=np.uint8)
filled = cv2.inpaint(pseudoempty,bytemask,3,cv2.INPAINT_NS)
blurred = cv2.GaussianBlur(filled,(21,21),0)
pseudoempty[spots] = blurred[spots]
else:
pseudoempty=arr0
else:
pseudoempty=0
for FILENAME in barcdict[BARCODE]:
startim=time.time()
im,arr=c2.openImage(FILENAME)
# Local updates on individual images to allow for slight movement of plate
#locationsN=c2.locateCultures([int(round(cx-dx/2.0)) for cx in candx],[int(round(cy-dy/2.0)) for cy in candy],dx,dy,arr,ncol,nrow,update=True)
ave=np.mean(arr[agar])
# Correct for minor lighting differences between images
arr=np.maximum(0,np.minimum(255,arr-(ave-ave0)))
# Subtract background (corrects for lighting differences within plate/image as well as making agar intensity correspond to zero signal)
arr=np.maximum(arr-pseudoempty,0)
# Measure culture phenotypes
locations=c2.measureSizeAndColour(locationsN,arr,im,spots,0,BARCODE,FILENAME[0:-4])
# Write results to file
locations.to_csv(os.path.join(os.path.dirname(FILENAME),"Output_Data",os.path.basename(FILENAME).split(".")[0]+".out"),"\t",index=False,engine='python')
dataf=c2.saveColonyzer(os.path.join(os.path.dirname(FILENAME),"Output_Data",os.path.basename(FILENAME).split(".")[0]+".dat"),locations,thresh,dx,dy)
# Visual check of culture locations
imthresh=c2.threshPreview(locations,mask,None)
r=5
draw=ImageDraw.Draw(imthresh)
draw.ellipse((com[1]-r,com[0]-r,com[1]+r,com[0]+r),fill=(255,0,0))
draw.ellipse((corner[1]-r,corner[0]-r,corner[1]+r,corner[0]+r),fill=(255,0,0))
draw.ellipse((guess[1]-r,guess[0]-r,guess[1]+r,guess[0]+r),fill=(255,0,0))
draw.ellipse((candx[0]-r,candy[0]-r,candx[0]+r,candy[0]+r),fill=(255,0,0))
imthresh.save(os.path.join(os.path.dirname(FILENAME),"Output_Images",os.path.basename(FILENAME).split(".")[0]+".png"))
# Get ready for next image
if verbose: print("Finished {0} in {1:.2f}s".format(os.path.basename(FILENAME),time.time()-startim))
# Get ready for next image
if verbose: print("Finished {0} in {1:.2f}s".format(os.path.basename(BARCODE),time.time()-start))
barcdict={x:barcdict[x] for x in barcdict.keys() if not c2.checkAnalysisStarted(barcdict[x][-1])}
if plots:
pdf.close()
print("No more barcodes to analyse... I'm done.")
if __name__ == '__main__':
main()
| gpl-3.0 |
BhallaLab/moose-full | moose-examples/snippets/ionchannel.py | 2 | 8640 | # ionchannel.py ---
#
# Filename: ionchannel.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Wed Sep 17 10:33:20 2014 (+0530)
# Version:
# Last-Updated:
# By:
# Update #: 0
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
"""This demo shows how to set the parameters for a Hodgkin-Huxley type ion channel.
Hodgkin-Huxley type ion channels are composed of one or more gates
that allow ions to cross the membrane. The gates transition between
open and closed states and this, taken over a large population of
ion channels over a patch of membrane has first order kinetics, where
the rate of change of fraction of open gates (n) is given by::
dn/dt = alpha(Vm) * (1 - n) - beta(Vm) * n
where alpha and beta are rate parameters for gate opening and
closing respectively that depend on the membrane potential.
The final channel conductance is computed as::
Gbar * m^x * h^y ...
where m, n are the fraction of open gates of different types and x,
y are the number of such gates in each channel. We can define the
channel by specifying the alpha and beta parameters as functions of
membrane potential and the exponents for each gate.
The number gates is commonly one or two.
Gate opening/closing rates have the form::
y(x) = (A + B * x) / (C + exp((x + D) / F))
where x is membrane voltage and y is the rate parameter for gate
closing or opening.
"""
import numpy as np
import matplotlib.pyplot as plt
import moose
EREST_ACT = -70e-3 #: Resting membrane potential
#: The parameters for defining m as a function of Vm
Na_m_params = [1e5 * (25e-3 + EREST_ACT), # 'A_A':
-1e5, # 'A_B':
-1.0, # 'A_C':
-25e-3 - EREST_ACT, # 'A_D':
-10e-3, # 'A_F':
4e3, # 'B_A':
0.0, # 'B_B':
0.0, # 'B_C':
0.0 - EREST_ACT, # 'B_D':
18e-3 # 'B_F':
]
#: Parameters for defining h gate of Na+ channel
Na_h_params = [ 70.0, # 'A_A':
0.0, # 'A_B':
0.0, # 'A_C':
0.0 - EREST_ACT, # 'A_D':
0.02, # 'A_F':
1000.0, # 'B_A':
0.0, # 'B_B':
1.0, # 'B_C':
-30e-3 - EREST_ACT, # 'B_D':
-0.01 # 'B_F':
]
#: K+ channel in Hodgkin-Huxley model has only one gate, n and these
#are the parameters for the same
K_n_params = [ 1e4 * (10e-3 + EREST_ACT), # 'A_A':
-1e4, # 'A_B':
-1.0, # 'A_C':
-10e-3 - EREST_ACT, # 'A_D':
-10e-3, # 'A_F':
0.125e3, # 'B_A':
0.0, # 'B_B':
0.0, # 'B_C':
0.0 - EREST_ACT, # 'B_D':
80e-3 # 'B_F':
]
#: We define the rate parameters, which are functions of Vm as
#: interpolation tables looked up by membrane potential.
#: Minimum x-value for the interpolation table
VMIN = -30e-3 + EREST_ACT
#: Maximum x-value for the interpolation table
VMAX = 120e-3 + EREST_ACT
#: Number of divisions in the interpolation table
VDIVS = 3000
def create_na_proto():
"""Create and return a Na+ channel prototype '/library/na'
The Na+ channel conductance has the equation::
g = Gbar * m^3 * h
For each gate, we use the HHChannel.setupAlpha function to set up
the interpolation table.
"""
lib = moose.Neutral('/library')
na = moose.HHChannel('/library/na')
na.tick = -1
#: The exponent for m gate is 3
na.Xpower = 3
#: channel/gateX is the m gate
#: setting Xpower to a positive number automatically creates this gate.
xGate = moose.element(na.path + '/gateX')
xGate.setupAlpha(Na_m_params +
[VDIVS, VMIN, VMAX])
#: channel/gateY is the h gate
#: Exponent for h gate is 1
na.Ypower = 1
yGate = moose.element(na.path + '/gateY')
yGate.setupAlpha(Na_h_params +
[VDIVS, VMIN, VMAX])
return na
def create_k_proto():
"""Create and return a K+ channel prototype '/library/k'.
The K+ channel conductance has the equation::
g = Gbar * n^4
"""
lib = moose.Neutral('/library')
k = moose.HHChannel('/library/k')
k.tick = -1
k.Xpower = 4.0
xGate = moose.HHGate(k.path + '/gateX')
xGate.setupAlpha(K_n_params +
[VDIVS, VMIN, VMAX])
return k
def create_1comp_neuron(path, number=1):
"""Create single-compartmental neuron with Na+ and K+ channels.
Parameters
----------
path : str
path of the compartment to be created
number : int
number of compartments to be created. If n is greater than 1,
we create a vec with that size, each having the same property.
Returns
-------
comp : moose.Compartment
a compartment vec with `number` elements.
"""
comps = moose.vec(path=path, n=number, dtype='Compartment')
diameter = 30e-6
length = 50e-6
sarea = np.pi * diameter * length
xarea = np.pi * diameter * diameter / 4.0
Em = EREST_ACT + 10.613e-3
comps.Em = Em
comps.initVm = EREST_ACT
#: CM = 1 uF/cm^2
comps.Cm = 1e-6 * sarea * 1e4
#: RM = 0.3 mS/cm^2
comps.Rm = 1 / (0.3e-3 * sarea * 1e4)
container = comps[0].parent.path
#: Here we create copies of the prototype channels
nachan = moose.copy(create_na_proto(), container, 'na_{}'.format(comps.name), number)
#: Gbar_Na = 120 mS/cm^2
nachan.Gbar = [120e-3 * sarea * 1e4] * len(nachan)
nachan.Ek = 115e-3 + EREST_ACT
moose.connect(nachan, 'channel', comps, 'channel', 'OneToOne')
kchan = moose.copy(create_k_proto(), container, 'k_{}'.format(comps.name), number)
#: Gbar_K = 36 mS/cm^2
kchan.Gbar = 36e-3 * sarea * 1e4
kchan.Ek = -12e-3 + EREST_ACT
moose.connect(kchan, 'channel', comps, 'channel', 'OneToOne')
return comps
def current_step_test(simtime, simdt, plotdt):
"""Create a single compartment and set it up for applying a step
current injection.
We use a PulseGen object to generate a 40 ms wide 1 nA current
pulse that starts 20 ms after start of simulation.
"""
model = moose.Neutral('/model')
comp = create_1comp_neuron('/model/neuron')
stim = moose.PulseGen('/model/stimulus')
stim.delay[0] = 20e-3
stim.level[0] = 1e-9
stim.width[0] = 40e-3
stim.delay[1] = 1e9
moose.connect(stim, 'output', comp, 'injectMsg')
data = moose.Neutral('/data')
current_tab = moose.Table('/data/current')
moose.connect(current_tab, 'requestOut', stim, 'getOutputValue')
vm_tab = moose.Table('/data/Vm')
moose.connect(vm_tab, 'requestOut', comp, 'getVm')
for i in range(10):
moose.setClock(i, simdt)
moose.setClock(8, plotdt)
moose.reinit()
moose.start(simtime)
ts = np.linspace(0, simtime, len(vm_tab.vector))
return ts, current_tab.vector, vm_tab.vector,
if __name__ == '__main__':
simtime = 0.1
simdt = 0.25e-5
plotdt = 0.25e-3
ts, current, vm = current_step_test(simtime, simdt, plotdt)
plt.plot(ts, vm * 1e3, label='Vm (mV)')
plt.plot(ts, current * 1e9, label='current (nA)')
plt.legend()
plt.show()
#
# ionchannel.py ends here
| gpl-2.0 |
xinfang/face-recognize | demos/classifier_webcam.py | 4 | 7059 | #!/usr/bin/env python2
#
# Example to run classifier on webcam stream.
# Brandon Amos & Vijayenthiran
# 2016/06/21
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Contrib: Vijayenthiran
# This example file shows to run a classifier on webcam stream. You need to
# run the classifier.py to generate classifier with your own dataset.
# To run this file from the openface home dir:
# ./demo/classifier_webcam.py <path-to-your-classifier>
import time
start = time.time()
import argparse
import cv2
import os
import pickle
import sys
import numpy as np
np.set_printoptions(precision=2)
from sklearn.mixture import GMM
import openface
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
def getRep(bgrImg):
start = time.time()
if bgrImg is None:
raise Exception("Unable to load image/frame")
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
if args.verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
# Get the largest face bounding box
# bb = align.getLargestFaceBoundingBox(rgbImg) #Bounding box
# Get all bounding boxes
bb = align.getAllFaceBoundingBoxes(rgbImg)
if bb is None:
# raise Exception("Unable to find a face: {}".format(imgPath))
return None
if args.verbose:
print("Face detection took {} seconds.".format(time.time() - start))
start = time.time()
alignedFaces = []
for box in bb:
alignedFaces.append(
align.align(
args.imgDim,
rgbImg,
box,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE))
if alignedFaces is None:
raise Exception("Unable to align the frame")
if args.verbose:
print("Alignment took {} seconds.".format(time.time() - start))
start = time.time()
reps = []
for alignedFace in alignedFaces:
reps.append(net.forward(alignedFace))
if args.verbose:
print("Neural network forward pass took {} seconds.".format(
time.time() - start))
# print (reps)
return reps
def infer(img, args):
with open(args.classifierModel, 'r') as f:
if sys.version_info[0] < 3:
(le, clf) = pickle.load(f) # le - label and clf - classifer
else:
(le, clf) = pickle.load(f, encoding='latin1') # le - label and clf - classifer
reps = getRep(img)
persons = []
confidences = []
for rep in reps:
try:
rep = rep.reshape(1, -1)
except:
print ("No Face detected")
return (None, None)
start = time.time()
predictions = clf.predict_proba(rep).ravel()
# print (predictions)
maxI = np.argmax(predictions)
# max2 = np.argsort(predictions)[-3:][::-1][1]
persons.append(le.inverse_transform(maxI))
# print (str(le.inverse_transform(max2)) + ": "+str( predictions [max2]))
# ^ prints the second prediction
confidences.append(predictions[maxI])
if args.verbose:
print("Prediction took {} seconds.".format(time.time() - start))
pass
# print("Predict {} with {:.2f} confidence.".format(person.decode('utf-8'), confidence))
if isinstance(clf, GMM):
dist = np.linalg.norm(rep - clf.means_[maxI])
print(" + Distance from the mean: {}".format(dist))
pass
return (persons, confidences)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dlibFacePredictor',
type=str,
help="Path to dlib's face predictor.",
default=os.path.join(
dlibModelDir,
"shape_predictor_68_face_landmarks.dat"))
parser.add_argument(
'--networkModel',
type=str,
help="Path to Torch network model.",
default=os.path.join(
openfaceModelDir,
'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument(
'--captureDevice',
type=int,
default=0,
help='Capture device. 0 for latop webcam and 1 for usb webcam')
parser.add_argument('--width', type=int, default=320)
parser.add_argument('--height', type=int, default=240)
parser.add_argument('--threshold', type=float, default=0.5)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument(
'classifierModel',
type=str,
help='The Python pickle representing the classifier. This is NOT the Torch network model, which can be set with --networkModel.')
args = parser.parse_args()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(
args.networkModel,
imgDim=args.imgDim,
cuda=args.cuda)
# Capture device. Usually 0 will be webcam and 1 will be usb cam.
video_capture = cv2.VideoCapture(args.captureDevice)
video_capture.set(3, args.width)
video_capture.set(4, args.height)
confidenceList = []
while True:
ret, frame = video_capture.read()
persons, confidences = infer(frame, args)
print ("P: " + str(persons) + " C: " + str(confidences))
try:
# append with two floating point precision
confidenceList.append('%.2f' % confidences[0])
except:
# If there is no face detected, confidences matrix will be empty.
# We can simply ignore it.
pass
for i, c in enumerate(confidences):
if c <= args.threshold: # 0.5 is kept as threshold for known face.
persons[i] = "_unknown"
# Print the person name and conf value on the frame
cv2.putText(frame, "P: {} C: {}".format(persons, confidences),
(50, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 1)
cv2.imshow('', frame)
# quit the program on the press of key 'q'
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
| apache-2.0 |
mitkin/pyresample | pyresample/plot.py | 1 | 9821 | # pyresample, Resampling of remote sensing image data in python
#
# Copyright (C) 2010-2015
#
# Authors:
# Esben S. Nielsen
# Thomas Lavergne
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import numpy as np
def ellps2axis(ellps_name):
"""Get semi-major and semi-minor axis from ellipsis definition
Parameters
---------
ellps_name : str
Standard name of ellipsis
Returns
-------
(a, b) : semi-major and semi-minor axis
"""
ellps = {'helmert': {'a': 6378200.0, 'b': 6356818.1696278909},
'intl': {'a': 6378388.0, 'b': 6356911.9461279465},
'merit': {'a': 6378137.0, 'b': 6356752.2982159676},
'wgs72': {'a': 6378135.0, 'b': 6356750.5200160937},
'sphere': {'a': 6370997.0, 'b': 6370997.0},
'clrk66': {'a': 6378206.4000000004, 'b': 6356583.7999999998},
'nwl9d': {'a': 6378145.0, 'b': 6356759.7694886839},
'lerch': {'a': 6378139.0, 'b': 6356754.2915103417},
'evrstss': {'a': 6377298.5559999999, 'b': 6356097.5503008962},
'evrst30': {'a': 6377276.3449999997, 'b': 6356075.4131402401},
'mprts': {'a': 6397300.0, 'b': 6363806.2827225132},
'krass': {'a': 6378245.0, 'b': 6356863.0187730473},
'walbeck': {'a': 6376896.0, 'b': 6355834.8466999996},
'kaula': {'a': 6378163.0, 'b': 6356776.9920869097},
'wgs66': {'a': 6378145.0, 'b': 6356759.7694886839},
'evrst56': {'a': 6377301.2429999998, 'b': 6356100.2283681016},
'new_intl': {'a': 6378157.5, 'b': 6356772.2000000002},
'airy': {'a': 6377563.3959999997, 'b': 6356256.9100000001},
'bessel': {'a': 6377397.1550000003, 'b': 6356078.9628181886},
'seasia': {'a': 6378155.0, 'b': 6356773.3205000004},
'aust_sa': {'a': 6378160.0, 'b': 6356774.7191953054},
'wgs84': {'a': 6378137.0, 'b': 6356752.3142451793},
'hough': {'a': 6378270.0, 'b': 6356794.3434343431},
'wgs60': {'a': 6378165.0, 'b': 6356783.2869594367},
'engelis': {'a': 6378136.0499999998, 'b': 6356751.3227215428},
'apl4.9': {'a': 6378137.0, 'b': 6356751.796311819},
'andrae': {'a': 6377104.4299999997, 'b': 6355847.4152333336},
'sgs85': {'a': 6378136.0, 'b': 6356751.301568781},
'delmbr': {'a': 6376428.0, 'b': 6355957.9261637237},
'fschr60m': {'a': 6378155.0, 'b': 6356773.3204827355},
'iau76': {'a': 6378140.0, 'b': 6356755.2881575283},
'plessis': {'a': 6376523.0, 'b': 6355863.0},
'cpm': {'a': 6375738.7000000002, 'b': 6356666.221912113},
'fschr68': {'a': 6378150.0, 'b': 6356768.3372443849},
'mod_airy': {'a': 6377340.1890000002, 'b': 6356034.4460000005},
'grs80': {'a': 6378137.0, 'b': 6356752.3141403561},
'bess_nam': {'a': 6377483.8650000002, 'b': 6356165.3829663256},
'fschr60': {'a': 6378166.0, 'b': 6356784.2836071067},
'clrk80': {'a': 6378249.1449999996, 'b': 6356514.9658284895},
'evrst69': {'a': 6377295.6639999999, 'b': 6356094.6679152036},
'grs67': {'a': 6378160.0, 'b': 6356774.5160907144},
'evrst48': {'a': 6377304.0630000001, 'b': 6356103.0389931547}}
try:
ellps_axis = ellps[ellps_name.lower()]
a = ellps_axis['a']
b = ellps_axis['b']
except KeyError as e:
raise ValueError(('Could not determine semi-major and semi-minor axis '
'of specified ellipsis %s') % ellps_name)
return a, b
def area_def2basemap(area_def, **kwargs):
"""Get Basemap object from AreaDefinition
Parameters
---------
area_def : object
geometry.AreaDefinition object
\*\*kwargs: Keyword arguments
Additional initialization arguments for Basemap
Returns
-------
bmap : Basemap object
"""
from mpl_toolkits.basemap import Basemap
try:
a, b = ellps2axis(area_def.proj_dict['ellps'])
rsphere = (a, b)
except KeyError:
try:
a = float(area_def.proj_dict['a'])
try:
b = float(area_def.proj_dict['b'])
rsphere = (a, b)
except KeyError:
rsphere = a
except KeyError:
# Default to WGS84 ellipsoid
a, b = ellps2axis('wgs84')
rsphere = (a, b)
# Add projection specific basemap args to args passed to function
basemap_args = kwargs
basemap_args['rsphere'] = rsphere
if area_def.proj_dict['proj'] in ('ortho', 'geos', 'nsper'):
llcrnrx, llcrnry, urcrnrx, urcrnry = area_def.area_extent
basemap_args['llcrnrx'] = llcrnrx
basemap_args['llcrnry'] = llcrnry
basemap_args['urcrnrx'] = urcrnrx
basemap_args['urcrnry'] = urcrnry
else:
llcrnrlon, llcrnrlat, urcrnrlon, urcrnrlat = area_def.area_extent_ll
basemap_args['llcrnrlon'] = llcrnrlon
basemap_args['llcrnrlat'] = llcrnrlat
basemap_args['urcrnrlon'] = urcrnrlon
basemap_args['urcrnrlat'] = urcrnrlat
if area_def.proj_dict['proj'] == 'eqc':
basemap_args['projection'] = 'cyl'
else:
basemap_args['projection'] = area_def.proj_dict['proj']
# Try adding potentially remaining args
for key in ('lon_0', 'lat_0', 'lon_1', 'lat_1', 'lon_2', 'lat_2',
'lat_ts'):
try:
basemap_args[key] = float(area_def.proj_dict[key])
except KeyError:
pass
return Basemap(**basemap_args)
def _get_quicklook(area_def, data, vmin=None, vmax=None,
label='Variable (units)', num_meridians=45,
num_parallels=10, coast_res='c', cmap='jet'):
"""Get default Basemap matplotlib plot
"""
if area_def.shape != data.shape:
raise ValueError('area_def shape %s does not match data shape %s' %
(list(area_def.shape), list(data.shape)))
import matplotlib.pyplot as plt
bmap = area_def2basemap(area_def, resolution=coast_res)
bmap.drawcoastlines()
if num_meridians > 0:
bmap.drawmeridians(np.arange(-180, 180, num_meridians))
if num_parallels > 0:
bmap.drawparallels(np.arange(-90, 90, num_parallels))
if not (np.ma.isMaskedArray(data) and data.mask.all()):
col = bmap.imshow(data, origin='upper', vmin=vmin, vmax=vmax, cmap=cmap)
plt.colorbar(col, shrink=0.5, pad=0.05).set_label(label)
return plt
def show_quicklook(area_def, data, vmin=None, vmax=None,
label='Variable (units)', num_meridians=45,
num_parallels=10, coast_res='c', cmap='jet'):
"""Display default quicklook plot
Parameters
---------
area_def : object
geometry.AreaDefinition object
data : numpy array | numpy masked array
2D array matching area_def. Use masked array for transparent values
vmin : float, optional
Min value for luminescence scaling
vmax : float, optional
Max value for luminescence scaling
label : str, optional
Label for data
num_meridians : int, optional
Number of meridians to plot on the globe
num_parallels : int, optional
Number of parallels to plot on the globe
coast_res : {'c', 'l', 'i', 'h', 'f'}, optional
Resolution of coastlines
Returns
-------
bmap : Basemap object
"""
plt = _get_quicklook(area_def, data, vmin=vmin, vmax=vmax,
label=label, num_meridians=num_meridians,
num_parallels=num_parallels, coast_res=coast_res,
cmap=cmap)
plt.show()
plt.close()
def save_quicklook(filename, area_def, data, vmin=None, vmax=None,
label='Variable (units)', num_meridians=45,
num_parallels=10, coast_res='c', backend='AGG',
cmap='jet'):
"""Display default quicklook plot
Parameters
----------
filename : str
path to output file
area_def : object
geometry.AreaDefinition object
data : numpy array | numpy masked array
2D array matching area_def. Use masked array for transparent values
vmin : float, optional
Min value for luminescence scaling
vmax : float, optional
Max value for luminescence scaling
label : str, optional
Label for data
num_meridians : int, optional
Number of meridians to plot on the globe
num_parallels : int, optional
Number of parallels to plot on the globe
coast_res : {'c', 'l', 'i', 'h', 'f'}, optional
Resolution of coastlines
backend : str, optional
matplotlib backend to use'
"""
import matplotlib
matplotlib.use(backend, warn=False)
plt = _get_quicklook(area_def, data, vmin=vmin, vmax=vmax,
label=label, num_meridians=num_meridians,
num_parallels=num_parallels, coast_res=coast_res)
plt.savefig(filename, bbox_inches='tight')
plt.close()
| lgpl-3.0 |
jlegendary/scikit-learn | sklearn/decomposition/tests/test_factor_analysis.py | 222 | 3055 | # Author: Christian Osendorfer <osendorf@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
assert_raises(ValueError, FactorAnalysis, svd_method='foo')
fa_fail = FactorAnalysis()
fa_fail.svd_method = 'foo'
assert_raises(ValueError, fa_fail.fit, X)
fas = []
for method in ['randomized', 'lapack']:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert_equal(X_t.shape, (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert_greater(diff, 0., 'Log likelihood dif not increase')
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_less(diff, 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
f = lambda x, y: np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ['loglike_', 'components_', 'noise_variance_']:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
assert_warns(ConvergenceWarning, fa1.fit, X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
| bsd-3-clause |
nikitasingh981/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/tseries/offsets/test_offsets.py | 1 | 130620 | from datetime import date, datetime, timedelta
from distutils.version import LooseVersion
import numpy as np
import pytest
from pandas._libs.tslibs import (
NaT, OutOfBoundsDatetime, Timestamp, conversion, timezones)
from pandas._libs.tslibs.frequencies import (
INVALID_FREQ_ERR_MSG, get_freq_code, get_freq_str)
import pandas._libs.tslibs.offsets as liboffsets
import pandas.compat as compat
from pandas.compat import range
from pandas.compat.numpy import np_datetime64_compat
from pandas.core.indexes.datetimes import DatetimeIndex, _to_M8, date_range
from pandas.core.series import Series
import pandas.util.testing as tm
from pandas.io.pickle import read_pickle
from pandas.tseries.frequencies import _offset_map, get_offset
from pandas.tseries.holiday import USFederalHolidayCalendar
import pandas.tseries.offsets as offsets
from pandas.tseries.offsets import (
FY5253, BDay, BMonthBegin, BMonthEnd, BQuarterBegin, BQuarterEnd,
BusinessHour, BYearBegin, BYearEnd, CBMonthBegin, CBMonthEnd, CDay,
CustomBusinessHour, DateOffset, Day, Easter, FY5253Quarter,
LastWeekOfMonth, MonthBegin, MonthEnd, Nano, QuarterBegin, QuarterEnd,
SemiMonthBegin, SemiMonthEnd, Tick, Week, WeekOfMonth, YearBegin, YearEnd)
from .common import assert_offset_equal, assert_onOffset
class WeekDay(object):
# TODO: Remove: This is not used outside of tests
MON = 0
TUE = 1
WED = 2
THU = 3
FRI = 4
SAT = 5
SUN = 6
####
# Misc function tests
####
def test_to_M8():
valb = datetime(2007, 10, 1)
valu = _to_M8(valb)
assert isinstance(valu, np.datetime64)
#####
# DateOffset Tests
#####
class Base(object):
_offset = None
d = Timestamp(datetime(2008, 1, 2))
timezones = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Tokyo', 'dateutil/US/Pacific']
def _get_offset(self, klass, value=1, normalize=False):
# create instance from offset class
if klass is FY5253:
klass = klass(n=value, startingMonth=1, weekday=1,
variation='last', normalize=normalize)
elif klass is FY5253Quarter:
klass = klass(n=value, startingMonth=1, weekday=1,
qtr_with_extra_week=1, variation='last',
normalize=normalize)
elif klass is LastWeekOfMonth:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is WeekOfMonth:
klass = klass(n=value, week=1, weekday=5, normalize=normalize)
elif klass is Week:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is DateOffset:
klass = klass(days=value, normalize=normalize)
else:
try:
klass = klass(value, normalize=normalize)
except Exception:
klass = klass(normalize=normalize)
return klass
def test_apply_out_of_range(self, tz_naive_fixture):
tz = tz_naive_fixture
if self._offset is None:
return
# try to create an out-of-bounds result timestamp; if we can't create
# the offset skip
try:
if self._offset in (BusinessHour, CustomBusinessHour):
# Using 10000 in BusinessHour fails in tz check because of DST
# difference
offset = self._get_offset(self._offset, value=100000)
else:
offset = self._get_offset(self._offset, value=10000)
result = Timestamp('20080101') + offset
assert isinstance(result, datetime)
assert result.tzinfo is None
# Check tz is preserved
t = Timestamp('20080101', tz=tz)
result = t + offset
assert isinstance(result, datetime)
assert t.tzinfo == result.tzinfo
except OutOfBoundsDatetime:
raise
except (ValueError, KeyError):
# we are creating an invalid offset
# so ignore
pass
def test_offsets_compare_equal(self):
# root cause of GH#456: __ne__ was not implemented
if self._offset is None:
return
offset1 = self._offset()
offset2 = self._offset()
assert not offset1 != offset2
assert offset1 == offset2
def test_rsub(self):
if self._offset is None or not hasattr(self, "offset2"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset2 attr
return
assert self.d - self.offset2 == (-self.offset2).apply(self.d)
def test_radd(self):
if self._offset is None or not hasattr(self, "offset2"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset2 attr
return
assert self.d + self.offset2 == self.offset2 + self.d
def test_sub(self):
if self._offset is None or not hasattr(self, "offset2"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset2 attr
return
off = self.offset2
with pytest.raises(Exception):
off - self.d
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + self._offset(-2)
assert self.d - self.offset2 == self.d - (2 * off - off)
def testMult1(self):
if self._offset is None or not hasattr(self, "offset1"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset1 attr
return
assert self.d + 10 * self.offset1 == self.d + self._offset(10)
assert self.d + 5 * self.offset1 == self.d + self._offset(5)
def testMult2(self):
if self._offset is None:
return
assert self.d + (-5 * self._offset(-10)) == self.d + self._offset(50)
assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)
def test_compare_str(self):
# GH#23524
# comparing to strings that cannot be cast to DateOffsets should
# not raise for __eq__ or __ne__
if self._offset is None:
return
off = self._get_offset(self._offset)
assert not off == "infer"
assert off != "foo"
# Note: inequalities are only implemented for Tick subclasses;
# tests for this are in test_ticks
class TestCommon(Base):
# exected value created by Base._get_offset
# are applied to 2011/01/01 09:00 (Saturday)
# used for .apply and .rollforward
expecteds = {'Day': Timestamp('2011-01-02 09:00:00'),
'DateOffset': Timestamp('2011-01-02 09:00:00'),
'BusinessDay': Timestamp('2011-01-03 09:00:00'),
'CustomBusinessDay': Timestamp('2011-01-03 09:00:00'),
'CustomBusinessMonthEnd': Timestamp('2011-01-31 09:00:00'),
'CustomBusinessMonthBegin': Timestamp('2011-01-03 09:00:00'),
'MonthBegin': Timestamp('2011-02-01 09:00:00'),
'BusinessMonthBegin': Timestamp('2011-01-03 09:00:00'),
'MonthEnd': Timestamp('2011-01-31 09:00:00'),
'SemiMonthEnd': Timestamp('2011-01-15 09:00:00'),
'SemiMonthBegin': Timestamp('2011-01-15 09:00:00'),
'BusinessMonthEnd': Timestamp('2011-01-31 09:00:00'),
'YearBegin': Timestamp('2012-01-01 09:00:00'),
'BYearBegin': Timestamp('2011-01-03 09:00:00'),
'YearEnd': Timestamp('2011-12-31 09:00:00'),
'BYearEnd': Timestamp('2011-12-30 09:00:00'),
'QuarterBegin': Timestamp('2011-03-01 09:00:00'),
'BQuarterBegin': Timestamp('2011-03-01 09:00:00'),
'QuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BQuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BusinessHour': Timestamp('2011-01-03 10:00:00'),
'CustomBusinessHour': Timestamp('2011-01-03 10:00:00'),
'WeekOfMonth': Timestamp('2011-01-08 09:00:00'),
'LastWeekOfMonth': Timestamp('2011-01-29 09:00:00'),
'FY5253Quarter': Timestamp('2011-01-25 09:00:00'),
'FY5253': Timestamp('2011-01-25 09:00:00'),
'Week': Timestamp('2011-01-08 09:00:00'),
'Easter': Timestamp('2011-04-24 09:00:00'),
'Hour': Timestamp('2011-01-01 10:00:00'),
'Minute': Timestamp('2011-01-01 09:01:00'),
'Second': Timestamp('2011-01-01 09:00:01'),
'Milli': Timestamp('2011-01-01 09:00:00.001000'),
'Micro': Timestamp('2011-01-01 09:00:00.000001'),
'Nano': Timestamp(np_datetime64_compat(
'2011-01-01T09:00:00.000000001Z'))}
def test_immutable(self, offset_types):
# GH#21341 check that __setattr__ raises
offset = self._get_offset(offset_types)
with pytest.raises(AttributeError):
offset.normalize = True
with pytest.raises(AttributeError):
offset.n = 91
def test_return_type(self, offset_types):
offset = self._get_offset(offset_types)
# make sure that we are returning a Timestamp
result = Timestamp('20080101') + offset
assert isinstance(result, Timestamp)
# make sure that we are returning NaT
assert NaT + offset is NaT
assert offset + NaT is NaT
assert NaT - offset is NaT
assert (-offset).apply(NaT) is NaT
def test_offset_n(self, offset_types):
offset = self._get_offset(offset_types)
assert offset.n == 1
neg_offset = offset * -1
assert neg_offset.n == -1
mul_offset = offset * 3
assert mul_offset.n == 3
def test_offset_timedelta64_arg(self, offset_types):
# check that offset._validate_n raises TypeError on a timedelt64
# object
off = self._get_offset(offset_types)
td64 = np.timedelta64(4567, 's')
with pytest.raises(TypeError, match="argument must be an integer"):
type(off)(n=td64, **off.kwds)
def test_offset_mul_ndarray(self, offset_types):
off = self._get_offset(offset_types)
expected = np.array([[off, off * 2], [off * 3, off * 4]])
result = np.array([[1, 2], [3, 4]]) * off
tm.assert_numpy_array_equal(result, expected)
result = off * np.array([[1, 2], [3, 4]])
tm.assert_numpy_array_equal(result, expected)
def test_offset_freqstr(self, offset_types):
offset = self._get_offset(offset_types)
freqstr = offset.freqstr
if freqstr not in ('<Easter>',
"<DateOffset: days=1>",
'LWOM-SAT', ):
code = get_offset(freqstr)
assert offset.rule_code == code
def _check_offsetfunc_works(self, offset, funcname, dt, expected,
normalize=False):
if normalize and issubclass(offset, Tick):
# normalize=True disallowed for Tick subclasses GH#21427
return
offset_s = self._get_offset(offset, normalize=normalize)
func = getattr(offset_s, funcname)
result = func(dt)
assert isinstance(result, Timestamp)
assert result == expected
result = func(Timestamp(dt))
assert isinstance(result, Timestamp)
assert result == expected
# see gh-14101
exp_warning = None
ts = Timestamp(dt) + Nano(5)
if (offset_s.__class__.__name__ == 'DateOffset' and
(funcname == 'apply' or normalize) and
ts.nanosecond > 0):
exp_warning = UserWarning
# test nanosecond is preserved
with tm.assert_produces_warning(exp_warning,
check_stacklevel=False):
result = func(ts)
assert isinstance(result, Timestamp)
if normalize is False:
assert result == expected + Nano(5)
else:
assert result == expected
if isinstance(dt, np.datetime64):
# test tz when input is datetime or Timestamp
return
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
tz_obj = timezones.maybe_get_tz(tz)
dt_tz = conversion.localize_pydatetime(dt, tz_obj)
result = func(dt_tz)
assert isinstance(result, Timestamp)
assert result == expected_localize
result = func(Timestamp(dt, tz=tz))
assert isinstance(result, Timestamp)
assert result == expected_localize
# see gh-14101
exp_warning = None
ts = Timestamp(dt, tz=tz) + Nano(5)
if (offset_s.__class__.__name__ == 'DateOffset' and
(funcname == 'apply' or normalize) and
ts.nanosecond > 0):
exp_warning = UserWarning
# test nanosecond is preserved
with tm.assert_produces_warning(exp_warning,
check_stacklevel=False):
result = func(ts)
assert isinstance(result, Timestamp)
if normalize is False:
assert result == expected_localize + Nano(5)
else:
assert result == expected_localize
def test_apply(self, offset_types):
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for dt in [sdt, ndt]:
expected = self.expecteds[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'apply', dt, expected)
expected = Timestamp(expected.date())
self._check_offsetfunc_works(offset_types, 'apply', dt, expected,
normalize=True)
def test_rollforward(self, offset_types):
expecteds = self.expecteds.copy()
# result will not be changed if the target is on the offset
no_changes = ['Day', 'MonthBegin', 'SemiMonthBegin', 'YearBegin',
'Week', 'Hour', 'Minute', 'Second', 'Milli', 'Micro',
'Nano', 'DateOffset']
for n in no_changes:
expecteds[n] = Timestamp('2011/01/01 09:00')
expecteds['BusinessHour'] = Timestamp('2011-01-03 09:00:00')
expecteds['CustomBusinessHour'] = Timestamp('2011-01-03 09:00:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2011-01-02 00:00:00'),
'DateOffset': Timestamp('2011-01-02 00:00:00'),
'MonthBegin': Timestamp('2011-02-01 00:00:00'),
'SemiMonthBegin': Timestamp('2011-01-15 00:00:00'),
'YearBegin': Timestamp('2012-01-01 00:00:00'),
'Week': Timestamp('2011-01-08 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for dt in [sdt, ndt]:
expected = expecteds[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollforward', dt,
expected)
expected = norm_expected[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollforward', dt,
expected, normalize=True)
def test_rollback(self, offset_types):
expecteds = {'BusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthEnd':
Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthBegin':
Timestamp('2010-12-01 09:00:00'),
'BusinessMonthBegin': Timestamp('2010-12-01 09:00:00'),
'MonthEnd': Timestamp('2010-12-31 09:00:00'),
'SemiMonthEnd': Timestamp('2010-12-31 09:00:00'),
'BusinessMonthEnd': Timestamp('2010-12-31 09:00:00'),
'BYearBegin': Timestamp('2010-01-01 09:00:00'),
'YearEnd': Timestamp('2010-12-31 09:00:00'),
'BYearEnd': Timestamp('2010-12-31 09:00:00'),
'QuarterBegin': Timestamp('2010-12-01 09:00:00'),
'BQuarterBegin': Timestamp('2010-12-01 09:00:00'),
'QuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BQuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BusinessHour': Timestamp('2010-12-31 17:00:00'),
'CustomBusinessHour': Timestamp('2010-12-31 17:00:00'),
'WeekOfMonth': Timestamp('2010-12-11 09:00:00'),
'LastWeekOfMonth': Timestamp('2010-12-25 09:00:00'),
'FY5253Quarter': Timestamp('2010-10-26 09:00:00'),
'FY5253': Timestamp('2010-01-26 09:00:00'),
'Easter': Timestamp('2010-04-04 09:00:00')}
# result will not be changed if the target is on the offset
for n in ['Day', 'MonthBegin', 'SemiMonthBegin', 'YearBegin', 'Week',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',
'DateOffset']:
expecteds[n] = Timestamp('2011/01/01 09:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2010-12-31 00:00:00'),
'DateOffset': Timestamp('2010-12-31 00:00:00'),
'MonthBegin': Timestamp('2010-12-01 00:00:00'),
'SemiMonthBegin': Timestamp('2010-12-15 00:00:00'),
'YearBegin': Timestamp('2010-01-01 00:00:00'),
'Week': Timestamp('2010-12-25 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for dt in [sdt, ndt]:
expected = expecteds[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollback', dt,
expected)
expected = norm_expected[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollback', dt,
expected, normalize=True)
def test_onOffset(self, offset_types):
dt = self.expecteds[offset_types.__name__]
offset_s = self._get_offset(offset_types)
assert offset_s.onOffset(dt)
# when normalize=True, onOffset checks time is 00:00:00
if issubclass(offset_types, Tick):
# normalize=True disallowed for Tick subclasses GH#21427
return
offset_n = self._get_offset(offset_types, normalize=True)
assert not offset_n.onOffset(dt)
if offset_types in (BusinessHour, CustomBusinessHour):
# In default BusinessHour (9:00-17:00), normalized time
# cannot be in business hour range
return
date = datetime(dt.year, dt.month, dt.day)
assert offset_n.onOffset(date)
def test_add(self, offset_types, tz_naive_fixture):
tz = tz_naive_fixture
dt = datetime(2011, 1, 1, 9, 0)
offset_s = self._get_offset(offset_types)
expected = self.expecteds[offset_types.__name__]
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
assert isinstance(result, Timestamp)
assert result == expected
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
assert isinstance(result, Timestamp)
assert result == expected_localize
# normalize=True, disallowed for Tick subclasses GH#21427
if issubclass(offset_types, Tick):
return
offset_s = self._get_offset(offset_types, normalize=True)
expected = Timestamp(expected.date())
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
assert isinstance(result, Timestamp)
assert result == expected
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
assert isinstance(result, Timestamp)
assert result == expected_localize
def test_pickle_v0_15_2(self, datapath):
offsets = {'DateOffset': DateOffset(years=1),
'MonthBegin': MonthBegin(1),
'Day': Day(1),
'YearBegin': YearBegin(1),
'Week': Week(1)}
pickle_path = datapath('tseries', 'offsets', 'data',
'dateoffset_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
# with open(pickle_path, 'wb') as f: pickle.dump(offsets, f)
#
tm.assert_dict_equal(offsets, read_pickle(pickle_path))
class TestDateOffset(Base):
def setup_method(self, method):
self.d = Timestamp(datetime(2008, 1, 2))
_offset_map.clear()
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert ((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert ((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert ((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert ((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert (DateOffset(months=2).copy() == DateOffset(months=2))
def test_eq(self):
offset1 = DateOffset(days=1)
offset2 = DateOffset(days=365)
assert offset1 != offset2
class TestBusinessDay(Base):
_offset = BDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset1 = self.offset
self.offset2 = BDay(2)
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<BusinessDay>'
assert repr(self.offset2) == '<2 * BusinessDays>'
if compat.PY37:
expected = '<BusinessDay: offset=datetime.timedelta(days=1)>'
else:
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
assert self.offset2(self.d) == datetime(2008, 1, 3)
def testRollback1(self):
assert BDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert (BDay(10).rollback(datetime(2008, 1, 5)) ==
datetime(2008, 1, 4))
def testRollforward1(self):
assert BDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert (BDay(10).rollforward(datetime(2008, 1, 5)) ==
datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = BDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, d, expected in tests:
assert_onOffset(offset, d, expected)
apply_cases = []
apply_cases.append((BDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
apply_cases.append((2 * BDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
apply_cases.append((-BDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
apply_cases.append((-2 * BDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
apply_cases.append((BDay(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + BDay(10)
assert result == datetime(2012, 11, 6)
result = dt + BDay(100) - BDay(100)
assert result == dt
off = BDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
off = BDay() * 10
rs = datetime(2014, 1, 5) + off # see #5890
xp = datetime(2014, 1, 17)
assert rs == xp
def test_apply_corner(self):
pytest.raises(TypeError, BDay().apply, BMonthEnd())
class TestBusinessHour(Base):
_offset = BusinessHour
def setup_method(self, method):
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = BusinessHour()
self.offset2 = BusinessHour(n=3)
self.offset3 = BusinessHour(n=-1)
self.offset4 = BusinessHour(n=-4)
from datetime import time as dt_time
self.offset5 = BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30))
self.offset6 = BusinessHour(start='20:00', end='05:00')
self.offset7 = BusinessHour(n=-2, start=dt_time(21, 30),
end=dt_time(6, 30))
def test_constructor_errors(self):
from datetime import time as dt_time
with pytest.raises(ValueError):
BusinessHour(start=dt_time(11, 0, 5))
with pytest.raises(ValueError):
BusinessHour(start='AAA')
with pytest.raises(ValueError):
BusinessHour(start='14:00:05')
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset1) == '<BusinessHour: BH=09:00-17:00>'
assert repr(self.offset2) == '<3 * BusinessHours: BH=09:00-17:00>'
assert repr(self.offset3) == '<-1 * BusinessHour: BH=09:00-17:00>'
assert repr(self.offset4) == '<-4 * BusinessHours: BH=09:00-17:00>'
assert repr(self.offset5) == '<BusinessHour: BH=11:00-14:30>'
assert repr(self.offset6) == '<BusinessHour: BH=20:00-05:00>'
assert repr(self.offset7) == '<-2 * BusinessHours: BH=21:30-06:30>'
def test_with_offset(self):
expected = Timestamp('2014-07-01 13:00')
assert self.d + BusinessHour() * 3 == expected
assert self.d + BusinessHour(n=3) == expected
def test_eq(self):
for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
assert offset == offset
assert BusinessHour() != BusinessHour(-1)
assert BusinessHour(start='09:00') == BusinessHour()
assert BusinessHour(start='09:00') != BusinessHour(start='09:01')
assert (BusinessHour(start='09:00', end='17:00') !=
BusinessHour(start='17:00', end='09:01'))
def test_hash(self):
for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
assert hash(offset) == hash(offset)
def test_call(self):
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 13)
assert self.offset3(self.d) == datetime(2014, 6, 30, 17)
assert self.offset4(self.d) == datetime(2014, 6, 30, 14)
def test_sub(self):
# we have to override test_sub here becasue self.offset2 is not
# defined as self._offset(2)
off = self.offset2
with pytest.raises(Exception):
off - self.d
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + self._offset(-3)
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
assert self.offset3.rollback(self.d) == self.d
assert self.offset4.rollback(self.d) == self.d
assert self.offset5.rollback(self.d) == datetime(2014, 6, 30, 14, 30)
assert self.offset6.rollback(self.d) == datetime(2014, 7, 1, 5, 0)
assert self.offset7.rollback(self.d) == datetime(2014, 7, 1, 6, 30)
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset2.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset3.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset4.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset5.rollback(d) == datetime(2014, 6, 30, 14, 30)
assert self.offset6.rollback(d) == d
assert self.offset7.rollback(d) == d
assert self._offset(5).rollback(self.d) == self.d
def testRollback2(self):
assert (self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) ==
datetime(2014, 7, 4, 17, 0))
def testRollforward1(self):
assert self.offset1.rollforward(self.d) == self.d
assert self.offset2.rollforward(self.d) == self.d
assert self.offset3.rollforward(self.d) == self.d
assert self.offset4.rollforward(self.d) == self.d
assert (self.offset5.rollforward(self.d) ==
datetime(2014, 7, 1, 11, 0))
assert (self.offset6.rollforward(self.d) ==
datetime(2014, 7, 1, 20, 0))
assert (self.offset7.rollforward(self.d) ==
datetime(2014, 7, 1, 21, 30))
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset3.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset4.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset5.rollforward(d) == datetime(2014, 7, 1, 11)
assert self.offset6.rollforward(d) == d
assert self.offset7.rollforward(d) == d
assert self._offset(5).rollforward(self.d) == self.d
def testRollforward2(self):
assert (self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) ==
datetime(2014, 7, 7, 9))
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
assert result == datetime(2014, 7, 4, 17)
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
normalize_cases = []
normalize_cases.append((BusinessHour(normalize=True), {
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
normalize_cases.append((BusinessHour(-1, normalize=True), {
datetime(2014, 7, 1, 8): datetime(2014, 6, 30),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30),
datetime(2014, 7, 1, 0): datetime(2014, 6, 30),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
normalize_cases.append((BusinessHour(1, normalize=True, start='17:00',
end='04:00'), {
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 2),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
@pytest.mark.parametrize('case', normalize_cases)
def test_normalize(self, case):
offset, cases = case
for dt, expected in compat.iteritems(cases):
assert offset.apply(dt) == expected
on_offset_cases = []
on_offset_cases.append((BusinessHour(), {
datetime(2014, 7, 1, 9): True,
datetime(2014, 7, 1, 8, 59): False,
datetime(2014, 7, 1, 8): False,
datetime(2014, 7, 1, 17): True,
datetime(2014, 7, 1, 17, 1): False,
datetime(2014, 7, 1, 18): False,
datetime(2014, 7, 5, 9): False,
datetime(2014, 7, 6, 12): False}))
on_offset_cases.append((BusinessHour(start='10:00', end='15:00'), {
datetime(2014, 7, 1, 9): False,
datetime(2014, 7, 1, 10): True,
datetime(2014, 7, 1, 15): True,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12): False,
datetime(2014, 7, 6, 12): False}))
on_offset_cases.append((BusinessHour(start='19:00', end='05:00'), {
datetime(2014, 7, 1, 9, 0): False,
datetime(2014, 7, 1, 10, 0): False,
datetime(2014, 7, 1, 15): False,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12, 0): False,
datetime(2014, 7, 6, 12, 0): False,
datetime(2014, 7, 1, 19, 0): True,
datetime(2014, 7, 2, 0, 0): True,
datetime(2014, 7, 4, 23): True,
datetime(2014, 7, 5, 1): True,
datetime(2014, 7, 5, 5, 0): True,
datetime(2014, 7, 6, 23, 0): False,
datetime(2014, 7, 7, 3, 0): False}))
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, cases = case
for dt, expected in compat.iteritems(cases):
assert offset.onOffset(dt) == expected
opening_time_cases = []
# opening time should be affected by sign of n, not by n's value and
# end
opening_time_cases.append(([BusinessHour(), BusinessHour(n=2),
BusinessHour(n=4), BusinessHour(end='10:00'),
BusinessHour(n=2, end='4:00'),
BusinessHour(n=4, end='15:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
# if timestamp is on opening time, next opening time is
# as it is
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 3, 9),
datetime(2014, 7, 2, 9)),
# 2014-07-05 is saturday
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 8, 9),
datetime(2014, 7, 7, 9))}))
opening_time_cases.append(([BusinessHour(start='11:15'),
BusinessHour(n=2, start='11:15'),
BusinessHour(n=3, start='11:15'),
BusinessHour(start='11:15', end='10:00'),
BusinessHour(n=2, start='11:15', end='4:00'),
BusinessHour(n=3, start='11:15',
end='15:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 11, 15),
datetime(2014, 6, 30, 11, 15)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 11, 15): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 2, 11, 15)),
datetime(2014, 7, 2, 11, 15, 1): (datetime(2014, 7, 3, 11, 15),
datetime(2014, 7, 2, 11, 15)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 11, 15),
datetime(2014, 7, 3, 11, 15)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15))}))
opening_time_cases.append(([BusinessHour(-1), BusinessHour(n=-2),
BusinessHour(n=-4),
BusinessHour(n=-1, end='10:00'),
BusinessHour(n=-2, end='4:00'),
BusinessHour(n=-4, end='15:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 3, 9)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 9): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 8, 9))}))
opening_time_cases.append(([BusinessHour(start='17:00', end='05:00'),
BusinessHour(n=3, start='17:00',
end='03:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 17),
datetime(2014, 6, 30, 17)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 4, 17): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 3, 17)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 7, 17, 1): (datetime(2014, 7, 8, 17),
datetime(2014, 7, 7, 17)), }))
opening_time_cases.append(([BusinessHour(-1, start='17:00', end='05:00'),
BusinessHour(n=-2, start='17:00',
end='03:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 16, 59): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 3, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 7, 18): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 8, 17))}))
@pytest.mark.parametrize('case', opening_time_cases)
def test_opening_time(self, case):
_offsets, cases = case
for offset in _offsets:
for dt, (exp_next, exp_prev) in compat.iteritems(cases):
assert offset._next_opening_time(dt) == exp_next
assert offset._prev_opening_time(dt) == exp_prev
apply_cases = []
apply_cases.append((BusinessHour(), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30)}))
apply_cases.append((BusinessHour(4), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30)}))
apply_cases.append((BusinessHour(-1), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30)}))
apply_cases.append((BusinessHour(-4), {
datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),
datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30)}))
apply_cases.append((BusinessHour(start='13:00', end='16:00'), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14)}))
apply_cases.append((BusinessHour(n=2, start='13:00', end='16:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),
datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30)}))
apply_cases.append((BusinessHour(n=-1, start='13:00', end='16:00'), {
datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),
datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15)}))
apply_cases.append((BusinessHour(n=-3, start='10:00', end='16:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),
datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30)}))
apply_cases.append((BusinessHour(start='19:00', end='05:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),
datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),
datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),
datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),
datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),
datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30)}))
apply_cases.append((BusinessHour(n=-1, start='19:00', end='05:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),
datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),
datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
apply_large_n_cases = []
# A week later
apply_large_n_cases.append((BusinessHour(40), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),
datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),
datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),
datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),
datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),
datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),
datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),
datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),
datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30)}))
# 3 days and 1 hour before
apply_large_n_cases.append((BusinessHour(-25), {
datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),
datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),
datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),
datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),
datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),
datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),
datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),
datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),
datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30)}))
# 5 days and 3 hours later
apply_large_n_cases.append((BusinessHour(28, start='21:00', end='02:00'), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),
datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),
datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),
datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),
datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30)}))
@pytest.mark.parametrize('case', apply_large_n_cases)
def test_apply_large_n(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_nanoseconds(self):
tests = []
tests.append((BusinessHour(),
{Timestamp('2014-07-04 15:00') + Nano(5): Timestamp(
'2014-07-04 16:00') + Nano(5),
Timestamp('2014-07-04 16:00') + Nano(5): Timestamp(
'2014-07-07 09:00') + Nano(5),
Timestamp('2014-07-04 16:00') - Nano(5): Timestamp(
'2014-07-04 17:00') - Nano(5)}))
tests.append((BusinessHour(-1),
{Timestamp('2014-07-04 15:00') + Nano(5): Timestamp(
'2014-07-04 14:00') + Nano(5),
Timestamp('2014-07-04 10:00') + Nano(5): Timestamp(
'2014-07-04 09:00') + Nano(5),
Timestamp('2014-07-04 10:00') - Nano(5): Timestamp(
'2014-07-03 17:00') - Nano(5), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_datetimeindex(self):
idx1 = date_range(start='2014-07-04 15:00', end='2014-07-08 10:00',
freq='BH')
idx2 = date_range(start='2014-07-04 15:00', periods=12, freq='BH')
idx3 = date_range(end='2014-07-08 10:00', periods=12, freq='BH')
expected = DatetimeIndex(['2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00',
'2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00',
'2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00',
'2014-07-07 16:00', '2014-07-08 09:00',
'2014-07-08 10:00'],
freq='BH')
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
idx1 = date_range(start='2014-07-04 15:45', end='2014-07-08 10:45',
freq='BH')
idx2 = date_range(start='2014-07-04 15:45', periods=12, freq='BH')
idx3 = date_range(end='2014-07-08 10:45', periods=12, freq='BH')
expected = DatetimeIndex(['2014-07-04 15:45', '2014-07-04 16:45',
'2014-07-07 09:45',
'2014-07-07 10:45', '2014-07-07 11:45',
'2014-07-07 12:45',
'2014-07-07 13:45', '2014-07-07 14:45',
'2014-07-07 15:45',
'2014-07-07 16:45', '2014-07-08 09:45',
'2014-07-08 10:45'],
freq='BH')
expected = idx1
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
class TestCustomBusinessHour(Base):
_offset = CustomBusinessHour
holidays = ['2014-06-27', datetime(2014, 6, 30),
np.datetime64('2014-07-02')]
def setup_method(self, method):
# 2014 Calendar to check custom holidays
# Sun Mon Tue Wed Thu Fri Sat
# 6/22 23 24 25 26 27 28
# 29 30 7/1 2 3 4 5
# 6 7 8 9 10 11 12
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = CustomBusinessHour(weekmask='Tue Wed Thu Fri')
self.offset2 = CustomBusinessHour(holidays=self.holidays)
def test_constructor_errors(self):
from datetime import time as dt_time
with pytest.raises(ValueError):
CustomBusinessHour(start=dt_time(11, 0, 5))
with pytest.raises(ValueError):
CustomBusinessHour(start='AAA')
with pytest.raises(ValueError):
CustomBusinessHour(start='14:00:05')
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset1) == '<CustomBusinessHour: CBH=09:00-17:00>'
assert repr(self.offset2) == '<CustomBusinessHour: CBH=09:00-17:00>'
def test_with_offset(self):
expected = Timestamp('2014-07-01 13:00')
assert self.d + CustomBusinessHour() * 3 == expected
assert self.d + CustomBusinessHour(n=3) == expected
def test_eq(self):
for offset in [self.offset1, self.offset2]:
assert offset == offset
assert CustomBusinessHour() != CustomBusinessHour(-1)
assert (CustomBusinessHour(start='09:00') ==
CustomBusinessHour())
assert (CustomBusinessHour(start='09:00') !=
CustomBusinessHour(start='09:01'))
assert (CustomBusinessHour(start='09:00', end='17:00') !=
CustomBusinessHour(start='17:00', end='09:01'))
assert (CustomBusinessHour(weekmask='Tue Wed Thu Fri') !=
CustomBusinessHour(weekmask='Mon Tue Wed Thu Fri'))
assert (CustomBusinessHour(holidays=['2014-06-27']) !=
CustomBusinessHour(holidays=['2014-06-28']))
def test_sub(self):
# override the Base.test_sub implementation because self.offset2 is
# defined differently in this class than the test expects
pass
def test_hash(self):
assert hash(self.offset1) == hash(self.offset1)
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 11)
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
d = datetime(2014, 7, 1, 0)
# 2014/07/01 is Tuesday, 06/30 is Monday(holiday)
assert self.offset1.rollback(d) == datetime(2014, 6, 27, 17)
# 2014/6/30 and 2014/6/27 are holidays
assert self.offset2.rollback(d) == datetime(2014, 6, 26, 17)
def testRollback2(self):
assert (self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) ==
datetime(2014, 7, 4, 17, 0))
def testRollforward1(self):
assert self.offset1.rollforward(self.d) == self.d
assert self.offset2.rollforward(self.d) == self.d
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
def testRollforward2(self):
assert (self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) ==
datetime(2014, 7, 7, 9))
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
assert result == datetime(2014, 7, 4, 17)
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
normalize_cases = []
normalize_cases.append((
CustomBusinessHour(normalize=True, holidays=holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3),
datetime(2014, 7, 1, 23): datetime(2014, 7, 3),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
normalize_cases.append((
CustomBusinessHour(-1, normalize=True, holidays=holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 6, 26),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 26),
datetime(2014, 7, 1, 0): datetime(2014, 6, 26),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
normalize_cases.append((
CustomBusinessHour(1, normalize=True,
start='17:00', end='04:00',
holidays=holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 3),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
@pytest.mark.parametrize('norm_cases', normalize_cases)
def test_normalize(self, norm_cases):
offset, cases = norm_cases
for dt, expected in compat.iteritems(cases):
assert offset.apply(dt) == expected
def test_onOffset(self):
tests = []
tests.append((CustomBusinessHour(start='10:00', end='15:00',
holidays=self.holidays),
{datetime(2014, 7, 1, 9): False,
datetime(2014, 7, 1, 10): True,
datetime(2014, 7, 1, 15): True,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12): False,
datetime(2014, 7, 6, 12): False}))
for offset, cases in tests:
for dt, expected in compat.iteritems(cases):
assert offset.onOffset(dt) == expected
apply_cases = []
apply_cases.append((
CustomBusinessHour(holidays=holidays),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30)}))
apply_cases.append((
CustomBusinessHour(4, holidays=holidays),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30)}))
@pytest.mark.parametrize('apply_case', apply_cases)
def test_apply(self, apply_case):
offset, cases = apply_case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
nano_cases = []
nano_cases.append(
(CustomBusinessHour(holidays=holidays),
{Timestamp('2014-07-01 15:00') + Nano(5):
Timestamp('2014-07-01 16:00') + Nano(5),
Timestamp('2014-07-01 16:00') + Nano(5):
Timestamp('2014-07-03 09:00') + Nano(5),
Timestamp('2014-07-01 16:00') - Nano(5):
Timestamp('2014-07-01 17:00') - Nano(5)}))
nano_cases.append(
(CustomBusinessHour(-1, holidays=holidays),
{Timestamp('2014-07-01 15:00') + Nano(5):
Timestamp('2014-07-01 14:00') + Nano(5),
Timestamp('2014-07-01 10:00') + Nano(5):
Timestamp('2014-07-01 09:00') + Nano(5),
Timestamp('2014-07-01 10:00') - Nano(5):
Timestamp('2014-06-26 17:00') - Nano(5)}))
@pytest.mark.parametrize('nano_case', nano_cases)
def test_apply_nanoseconds(self, nano_case):
offset, cases = nano_case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
class TestCustomBusinessDay(Base):
_offset = CDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.nd = np_datetime64_compat('2008-01-01 00:00:00Z')
self.offset = CDay()
self.offset1 = self.offset
self.offset2 = CDay(2)
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessDay>'
assert repr(self.offset2) == '<2 * CustomBusinessDays>'
if compat.PY37:
expected = '<BusinessDay: offset=datetime.timedelta(days=1)>'
else:
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
assert self.offset2(self.d) == datetime(2008, 1, 3)
assert self.offset2(self.nd) == datetime(2008, 1, 3)
def testRollback1(self):
assert CDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert (CDay(10).rollback(datetime(2008, 1, 5)) ==
datetime(2008, 1, 4))
def testRollforward1(self):
assert CDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert (CDay(10).rollforward(datetime(2008, 1, 5)) ==
datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = CDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [(CDay(), datetime(2008, 1, 1), True),
(CDay(), datetime(2008, 1, 5), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, d, expected = case
assert_onOffset(offset, d, expected)
apply_cases = []
apply_cases.append((CDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
apply_cases.append((2 * CDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
apply_cases.append((-CDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
apply_cases.append((-2 * CDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
apply_cases.append((CDay(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CDay(10)
assert result == datetime(2012, 11, 6)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
def test_apply_corner(self):
pytest.raises(Exception, CDay().apply, BMonthEnd())
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
tday = CDay(holidays=holidays)
for year in range(2012, 2015):
dt = datetime(year, 4, 30)
xp = datetime(year, 5, 2)
rs = dt + tday
assert rs == xp
def test_weekmask(self):
weekmask_saudi = 'Sat Sun Mon Tue Wed' # Thu-Fri Weekend
weekmask_uae = '1111001' # Fri-Sat Weekend
weekmask_egypt = [1, 1, 1, 1, 0, 0, 1] # Fri-Sat Weekend
bday_saudi = CDay(weekmask=weekmask_saudi)
bday_uae = CDay(weekmask=weekmask_uae)
bday_egypt = CDay(weekmask=weekmask_egypt)
dt = datetime(2013, 5, 1)
xp_saudi = datetime(2013, 5, 4)
xp_uae = datetime(2013, 5, 2)
xp_egypt = datetime(2013, 5, 2)
assert xp_saudi == dt + bday_saudi
assert xp_uae == dt + bday_uae
assert xp_egypt == dt + bday_egypt
xp2 = datetime(2013, 5, 5)
assert xp2 == dt + 2 * bday_saudi
assert xp2 == dt + 2 * bday_uae
assert xp2 == dt + 2 * bday_egypt
def test_weekmask_and_holidays(self):
weekmask_egypt = 'Sun Mon Tue Wed Thu' # Fri-Sat Weekend
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime(2013, 4, 30)
xp_egypt = datetime(2013, 5, 5)
assert xp_egypt == dt + 2 * bday_egypt
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_calendar(self):
calendar = USFederalHolidayCalendar()
dt = datetime(2014, 1, 17)
assert_offset_equal(CDay(calendar=calendar), dt, datetime(2014, 1, 21))
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self.offset)
_check_roundtrip(self.offset2)
_check_roundtrip(self.offset * 2)
def test_pickle_compat_0_14_1(self, datapath):
hdays = [datetime(2013, 1, 1) for ele in range(4)]
pth = datapath('tseries', 'offsets', 'data', 'cday-0.14.1.pickle')
cday0_14_1 = read_pickle(pth)
cday = CDay(holidays=hdays)
assert cday == cday0_14_1
class CustomBusinessMonthBase(object):
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask='Mon Wed Fri')
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthEnd>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthEnds>'
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert (CDay(10).rollback(datetime(2007, 12, 31)) ==
datetime(2007, 12, 31))
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, d, expected = case
assert_onOffset(offset, d, expected)
apply_cases = []
apply_cases.append((CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
apply_cases.append((2 * CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31)}))
apply_cases.append((-CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31)}))
apply_cases.append((-2 * CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31)}))
apply_cases.append((CBMonthEnd(0), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
assert result == datetime(2013, 7, 31)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-01-31', datetime(2012, 2, 28),
np.datetime64('2012-02-29')]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 30)
assert dt + 2 * bm_offset == datetime(2012, 2, 27)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
assert (date_range(start='20120101', end='20130101',
freq=freq).tolist()[0] == datetime(2012, 1, 31))
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_offset = CBMonthBegin
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthBegin>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthBegins>'
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 3, 3)
def testRollback1(self):
assert (CDay(10).rollback(datetime(2007, 12, 31)) ==
datetime(2007, 12, 31))
def testRollback2(self):
assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
def testRollforward1(self):
assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 3)
result = offset.rollforward(dt)
assert result == datetime(2012, 10, 1)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
apply_cases = []
apply_cases.append((CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3)}))
apply_cases.append((2 * CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1)}))
apply_cases.append((-CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1)}))
apply_cases.append((-2 * CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1)}))
apply_cases.append((CBMonthBegin(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
assert result == datetime(2013, 8, 1)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-02-01', datetime(2012, 2, 2),
np.datetime64('2012-03-01')]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 2)
assert dt + 2 * bm_offset == datetime(2012, 2, 3)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
hcal = USFederalHolidayCalendar()
cbmb = CBMonthBegin(calendar=hcal)
assert (date_range(start='20120101', end='20130101',
freq=cbmb).tolist()[0] == datetime(2012, 1, 3))
class TestWeek(Base):
_offset = Week
d = Timestamp(datetime(2008, 1, 2))
offset1 = _offset()
offset2 = _offset(2)
def test_repr(self):
assert repr(Week(weekday=0)) == "<Week: weekday=0>"
assert repr(Week(n=-1, weekday=0)) == "<-1 * Week: weekday=0>"
assert repr(Week(n=-2, weekday=0)) == "<-2 * Weeks: weekday=0>"
def test_corner(self):
with pytest.raises(ValueError):
Week(weekday=7)
with pytest.raises(ValueError, match="Day must be"):
Week(weekday=-1)
def test_isAnchored(self):
assert Week(weekday=0).isAnchored()
assert not Week().isAnchored()
assert not Week(2, weekday=2).isAnchored()
assert not Week(2).isAnchored()
offset_cases = []
# not business week
offset_cases.append((Week(), {
datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
# Mon
offset_cases.append((Week(weekday=0), {
datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
# n=0 -> roll forward. Mon
offset_cases.append((Week(0, weekday=0), {
datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
# n=0 -> roll forward. Mon
offset_cases.append((Week(-2, weekday=1), {
datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('weekday', range(7))
def test_onOffset(self, weekday):
offset = Week(weekday=weekday)
for day in range(1, 8):
date = datetime(2008, 1, day)
if day % 7 == weekday:
expected = True
else:
expected = False
assert_onOffset(offset, date, expected)
class TestWeekOfMonth(Base):
_offset = WeekOfMonth
offset1 = _offset()
offset2 = _offset(2)
def test_constructor(self):
with pytest.raises(ValueError, match="^Week"):
WeekOfMonth(n=1, week=4, weekday=0)
with pytest.raises(ValueError, match="^Week"):
WeekOfMonth(n=1, week=-1, weekday=0)
with pytest.raises(ValueError, match="^Day"):
WeekOfMonth(n=1, week=0, weekday=-1)
with pytest.raises(ValueError, match="^Day"):
WeekOfMonth(n=1, week=0, weekday=-7)
def test_repr(self):
assert (repr(WeekOfMonth(weekday=1, week=2)) ==
"<WeekOfMonth: week=2, weekday=1>")
def test_offset(self):
date1 = datetime(2011, 1, 4) # 1st Tuesday of Month
date2 = datetime(2011, 1, 11) # 2nd Tuesday of Month
date3 = datetime(2011, 1, 18) # 3rd Tuesday of Month
date4 = datetime(2011, 1, 25) # 4th Tuesday of Month
# see for loop for structure
test_cases = [
(-2, 2, 1, date1, datetime(2010, 11, 16)),
(-2, 2, 1, date2, datetime(2010, 11, 16)),
(-2, 2, 1, date3, datetime(2010, 11, 16)),
(-2, 2, 1, date4, datetime(2010, 12, 21)),
(-1, 2, 1, date1, datetime(2010, 12, 21)),
(-1, 2, 1, date2, datetime(2010, 12, 21)),
(-1, 2, 1, date3, datetime(2010, 12, 21)),
(-1, 2, 1, date4, datetime(2011, 1, 18)),
(0, 0, 1, date1, datetime(2011, 1, 4)),
(0, 0, 1, date2, datetime(2011, 2, 1)),
(0, 0, 1, date3, datetime(2011, 2, 1)),
(0, 0, 1, date4, datetime(2011, 2, 1)),
(0, 1, 1, date1, datetime(2011, 1, 11)),
(0, 1, 1, date2, datetime(2011, 1, 11)),
(0, 1, 1, date3, datetime(2011, 2, 8)),
(0, 1, 1, date4, datetime(2011, 2, 8)),
(0, 0, 1, date1, datetime(2011, 1, 4)),
(0, 1, 1, date2, datetime(2011, 1, 11)),
(0, 2, 1, date3, datetime(2011, 1, 18)),
(0, 3, 1, date4, datetime(2011, 1, 25)),
(1, 0, 0, date1, datetime(2011, 2, 7)),
(1, 0, 0, date2, datetime(2011, 2, 7)),
(1, 0, 0, date3, datetime(2011, 2, 7)),
(1, 0, 0, date4, datetime(2011, 2, 7)),
(1, 0, 1, date1, datetime(2011, 2, 1)),
(1, 0, 1, date2, datetime(2011, 2, 1)),
(1, 0, 1, date3, datetime(2011, 2, 1)),
(1, 0, 1, date4, datetime(2011, 2, 1)),
(1, 0, 2, date1, datetime(2011, 1, 5)),
(1, 0, 2, date2, datetime(2011, 2, 2)),
(1, 0, 2, date3, datetime(2011, 2, 2)),
(1, 0, 2, date4, datetime(2011, 2, 2)),
(1, 2, 1, date1, datetime(2011, 1, 18)),
(1, 2, 1, date2, datetime(2011, 1, 18)),
(1, 2, 1, date3, datetime(2011, 2, 15)),
(1, 2, 1, date4, datetime(2011, 2, 15)),
(2, 2, 1, date1, datetime(2011, 2, 15)),
(2, 2, 1, date2, datetime(2011, 2, 15)),
(2, 2, 1, date3, datetime(2011, 3, 15)),
(2, 2, 1, date4, datetime(2011, 3, 15))]
for n, week, weekday, dt, expected in test_cases:
offset = WeekOfMonth(n, week=week, weekday=weekday)
assert_offset_equal(offset, dt, expected)
# try subtracting
result = datetime(2011, 2, 1) - WeekOfMonth(week=1, weekday=2)
assert result == datetime(2011, 1, 12)
result = datetime(2011, 2, 3) - WeekOfMonth(week=0, weekday=2)
assert result == datetime(2011, 2, 2)
on_offset_cases = [(0, 0, datetime(2011, 2, 7), True),
(0, 0, datetime(2011, 2, 6), False),
(0, 0, datetime(2011, 2, 14), False),
(1, 0, datetime(2011, 2, 14), True),
(0, 1, datetime(2011, 2, 1), True),
(0, 1, datetime(2011, 2, 8), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
week, weekday, dt, expected = case
offset = WeekOfMonth(week=week, weekday=weekday)
assert offset.onOffset(dt) == expected
class TestLastWeekOfMonth(Base):
_offset = LastWeekOfMonth
offset1 = _offset()
offset2 = _offset(2)
def test_constructor(self):
with pytest.raises(ValueError, match="^N cannot be 0"):
LastWeekOfMonth(n=0, weekday=1)
with pytest.raises(ValueError, match="^Day"):
LastWeekOfMonth(n=1, weekday=-1)
with pytest.raises(ValueError, match="^Day"):
LastWeekOfMonth(n=1, weekday=7)
def test_offset(self):
# Saturday
last_sat = datetime(2013, 8, 31)
next_sat = datetime(2013, 9, 28)
offset_sat = LastWeekOfMonth(n=1, weekday=5)
one_day_before = (last_sat + timedelta(days=-1))
assert one_day_before + offset_sat == last_sat
one_day_after = (last_sat + timedelta(days=+1))
assert one_day_after + offset_sat == next_sat
# Test On that day
assert last_sat + offset_sat == next_sat
# Thursday
offset_thur = LastWeekOfMonth(n=1, weekday=3)
last_thurs = datetime(2013, 1, 31)
next_thurs = datetime(2013, 2, 28)
one_day_before = last_thurs + timedelta(days=-1)
assert one_day_before + offset_thur == last_thurs
one_day_after = last_thurs + timedelta(days=+1)
assert one_day_after + offset_thur == next_thurs
# Test on that day
assert last_thurs + offset_thur == next_thurs
three_before = last_thurs + timedelta(days=-3)
assert three_before + offset_thur == last_thurs
two_after = last_thurs + timedelta(days=+2)
assert two_after + offset_thur == next_thurs
offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN)
assert datetime(2013, 7, 31) + offset_sunday == datetime(2013, 8, 25)
on_offset_cases = [
(WeekDay.SUN, datetime(2013, 1, 27), True),
(WeekDay.SAT, datetime(2013, 3, 30), True),
(WeekDay.MON, datetime(2013, 2, 18), False), # Not the last Mon
(WeekDay.SUN, datetime(2013, 2, 25), False), # Not a SUN
(WeekDay.MON, datetime(2013, 2, 25), True),
(WeekDay.SAT, datetime(2013, 11, 30), True),
(WeekDay.SAT, datetime(2006, 8, 26), True),
(WeekDay.SAT, datetime(2007, 8, 25), True),
(WeekDay.SAT, datetime(2008, 8, 30), True),
(WeekDay.SAT, datetime(2009, 8, 29), True),
(WeekDay.SAT, datetime(2010, 8, 28), True),
(WeekDay.SAT, datetime(2011, 8, 27), True),
(WeekDay.SAT, datetime(2019, 8, 31), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
weekday, dt, expected = case
offset = LastWeekOfMonth(weekday=weekday)
assert offset.onOffset(dt) == expected
class TestSemiMonthEnd(Base):
_offset = SemiMonthEnd
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 31),
datetime(2008, 1, 15),
datetime(2008, 1, 31),
datetime(2008, 2, 15),
datetime(2008, 2, 29),
datetime(2008, 3, 15),
datetime(2008, 3, 31),
datetime(2008, 4, 15),
datetime(2008, 4, 30),
datetime(2008, 5, 15),
datetime(2008, 5, 31),
datetime(2008, 6, 15),
datetime(2008, 6, 30),
datetime(2008, 7, 15),
datetime(2008, 7, 31),
datetime(2008, 8, 15),
datetime(2008, 8, 31),
datetime(2008, 9, 15),
datetime(2008, 9, 30),
datetime(2008, 10, 15),
datetime(2008, 10, 31),
datetime(2008, 11, 15),
datetime(2008, 11, 30),
datetime(2008, 12, 15),
datetime(2008, 12, 31))
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthEnd(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = SemiMonthEnd().apply_index(s)
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq='SM')
exp = DatetimeIndex(dates)
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append((SemiMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 31)}))
offset_cases.append((SemiMonthEnd(day_of_month=20), {
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 20),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 20),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20)}))
offset_cases.append((SemiMonthEnd(0), {
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 16): datetime(2008, 1, 31),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 15)}))
offset_cases.append((SemiMonthEnd(0, day_of_month=16), {
datetime(2008, 1, 1): datetime(2008, 1, 16),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 16)}))
offset_cases.append((SemiMonthEnd(2), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 11, 30)}))
offset_cases.append((SemiMonthEnd(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 30): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
offset_cases.append((SemiMonthEnd(-1, day_of_month=4), {
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2007, 1, 4): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
offset_cases.append((SemiMonthEnd(-2), {
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 2, 15),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 14): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 15)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('case', offset_cases)
def test_apply_index(self, case):
offset, cases = case
s = DatetimeIndex(cases.keys())
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = offset.apply_index(s)
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
on_offset_cases = [(datetime(2007, 12, 31), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 1), False),
(datetime(2008, 2, 29), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
dt, expected = case
assert_onOffset(SemiMonthEnd(), dt, expected)
@pytest.mark.parametrize('klass', [Series, DatetimeIndex])
def test_vectorized_offset_addition(self, klass):
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'),
Timestamp('2000-02-01', tz='US/Central')], name='a')
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
class TestSemiMonthBegin(Base):
_offset = SemiMonthBegin
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 15),
datetime(2008, 1, 1),
datetime(2008, 1, 15),
datetime(2008, 2, 1),
datetime(2008, 2, 15),
datetime(2008, 3, 1),
datetime(2008, 3, 15),
datetime(2008, 4, 1),
datetime(2008, 4, 15),
datetime(2008, 5, 1),
datetime(2008, 5, 15),
datetime(2008, 6, 1),
datetime(2008, 6, 15),
datetime(2008, 7, 1),
datetime(2008, 7, 15),
datetime(2008, 8, 1),
datetime(2008, 8, 15),
datetime(2008, 9, 1),
datetime(2008, 9, 15),
datetime(2008, 10, 1),
datetime(2008, 10, 15),
datetime(2008, 11, 1),
datetime(2008, 11, 15),
datetime(2008, 12, 1),
datetime(2008, 12, 15))
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthBegin(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = SemiMonthBegin().apply_index(s)
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq='SMS')
exp = DatetimeIndex(dates)
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append((SemiMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2007, 1, 1)}))
offset_cases.append((SemiMonthBegin(day_of_month=20), {
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20)}))
offset_cases.append((SemiMonthBegin(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 2): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2007, 1, 1)}))
offset_cases.append((SemiMonthBegin(0, day_of_month=16), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 5): datetime(2007, 1, 16),
datetime(2007, 1, 1): datetime(2007, 1, 1)}))
offset_cases.append((SemiMonthBegin(2), {
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 15): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 12, 1)}))
offset_cases.append((SemiMonthBegin(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 6, 14): datetime(2008, 6, 1),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 15)}))
offset_cases.append((SemiMonthBegin(-1, day_of_month=4), {
datetime(2007, 1, 1): datetime(2006, 12, 4),
datetime(2007, 1, 4): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2006, 12, 2): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 4)}))
offset_cases.append((SemiMonthBegin(-2), {
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 1),
datetime(2008, 6, 14): datetime(2008, 5, 15),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 15): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 1)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('case', offset_cases)
def test_apply_index(self, case):
offset, cases = case
s = DatetimeIndex(cases.keys())
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = offset.apply_index(s)
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
on_offset_cases = [(datetime(2007, 12, 1), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 31), False),
(datetime(2008, 2, 15), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
dt, expected = case
assert_onOffset(SemiMonthBegin(), dt, expected)
@pytest.mark.parametrize('klass', [Series, DatetimeIndex])
def test_vectorized_offset_addition(self, klass):
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthBegin()
result2 = SemiMonthBegin() + s
exp = klass([Timestamp('2000-02-01 00:15:00', tz='US/Central'),
Timestamp('2000-03-01', tz='US/Central')], name='a')
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'),
Timestamp('2000-02-01', tz='US/Central')], name='a')
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthBegin()
result2 = SemiMonthBegin() + s
exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
def test_Easter():
assert_offset_equal(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4))
assert_offset_equal(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24))
assert_offset_equal(Easter(2), datetime(2010, 1, 1), datetime(2011, 4, 24))
assert_offset_equal(Easter(), datetime(2010, 4, 4), datetime(2011, 4, 24))
assert_offset_equal(Easter(2), datetime(2010, 4, 4), datetime(2012, 4, 8))
assert_offset_equal(-Easter(), datetime(2011, 1, 1), datetime(2010, 4, 4))
assert_offset_equal(-Easter(), datetime(2010, 4, 5), datetime(2010, 4, 4))
assert_offset_equal(-Easter(2),
datetime(2011, 1, 1),
datetime(2009, 4, 12))
assert_offset_equal(-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12))
assert_offset_equal(-Easter(2),
datetime(2010, 4, 4),
datetime(2008, 3, 23))
class TestOffsetNames(object):
def test_get_offset_name(self):
assert BDay().freqstr == 'B'
assert BDay(2).freqstr == '2B'
assert BMonthEnd().freqstr == 'BM'
assert Week(weekday=0).freqstr == 'W-MON'
assert Week(weekday=1).freqstr == 'W-TUE'
assert Week(weekday=2).freqstr == 'W-WED'
assert Week(weekday=3).freqstr == 'W-THU'
assert Week(weekday=4).freqstr == 'W-FRI'
assert LastWeekOfMonth(weekday=WeekDay.SUN).freqstr == "LWOM-SUN"
def test_get_offset():
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
get_offset('gibberish')
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
get_offset('QS-JAN-B')
pairs = [
('B', BDay()), ('b', BDay()), ('bm', BMonthEnd()),
('Bm', BMonthEnd()), ('W-MON', Week(weekday=0)),
('W-TUE', Week(weekday=1)), ('W-WED', Week(weekday=2)),
('W-THU', Week(weekday=3)), ('W-FRI', Week(weekday=4))]
for name, expected in pairs:
offset = get_offset(name)
assert offset == expected, ("Expected %r to yield %r (actual: %r)" %
(name, expected, offset))
def test_get_offset_legacy():
pairs = [('w@Sat', Week(weekday=5))]
for name, expected in pairs:
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
get_offset(name)
class TestOffsetAliases(object):
def setup_method(self, method):
_offset_map.clear()
def test_alias_equality(self):
for k, v in compat.iteritems(_offset_map):
if v is None:
continue
assert k == v.copy()
def test_rule_code(self):
lst = ['M', 'MS', 'BM', 'BMS', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
assert k == get_offset(k).rule_code
# should be cached - this is kind of an internals test...
assert k in _offset_map
assert k == (get_offset(k) * 3).rule_code
suffix_lst = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
base = 'W'
for v in suffix_lst:
alias = '-'.join([base, v])
assert alias == get_offset(alias).rule_code
assert alias == (get_offset(alias) * 5).rule_code
suffix_lst = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG',
'SEP', 'OCT', 'NOV', 'DEC']
base_lst = ['A', 'AS', 'BA', 'BAS', 'Q', 'QS', 'BQ', 'BQS']
for base in base_lst:
for v in suffix_lst:
alias = '-'.join([base, v])
assert alias == get_offset(alias).rule_code
assert alias == (get_offset(alias) * 5).rule_code
lst = ['M', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
code, stride = get_freq_code('3' + k)
assert isinstance(code, int)
assert stride == 3
assert k == get_freq_str(code)
def test_dateoffset_misc():
oset = offsets.DateOffset(months=2, days=4)
# it works
oset.freqstr
assert (not offsets.DateOffset(months=2) == 2)
def test_freq_offsets():
off = BDay(1, offset=timedelta(0, 1800))
assert (off.freqstr == 'B+30Min')
off = BDay(1, offset=timedelta(0, -1800))
assert (off.freqstr == 'B-30Min')
class TestReprNames(object):
def test_str_for_named_is_name(self):
# look at all the amazing combinations!
month_prefixes = ['A', 'AS', 'BA', 'BAS', 'Q', 'BQ', 'BQS', 'QS']
names = [prefix + '-' + month
for prefix in month_prefixes
for month in ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL',
'AUG', 'SEP', 'OCT', 'NOV', 'DEC']]
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
names += ['W-' + day for day in days]
names += ['WOM-' + week + day
for week in ('1', '2', '3', '4') for day in days]
_offset_map.clear()
for name in names:
offset = get_offset(name)
assert offset.freqstr == name
def get_utc_offset_hours(ts):
# take a Timestamp and compute total hours of utc offset
o = ts.utcoffset()
return (o.days * 24 * 3600 + o.seconds) / 3600.0
class TestDST(object):
"""
test DateOffset additions over Daylight Savings Time
"""
# one microsecond before the DST transition
ts_pre_fallback = "2013-11-03 01:59:59.999999"
ts_pre_springfwd = "2013-03-10 01:59:59.999999"
# test both basic names and dateutil timezones
timezone_utc_offsets = {
'US/Eastern': dict(utc_offset_daylight=-4,
utc_offset_standard=-5, ),
'dateutil/US/Pacific': dict(utc_offset_daylight=-7,
utc_offset_standard=-8, )
}
valid_date_offsets_singular = [
'weekday', 'day', 'hour', 'minute', 'second', 'microsecond'
]
valid_date_offsets_plural = [
'weeks', 'days',
'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'
]
def _test_all_offsets(self, n, **kwds):
valid_offsets = self.valid_date_offsets_plural if n > 1 \
else self.valid_date_offsets_singular
for name in valid_offsets:
self._test_offset(offset_name=name, offset_n=n, **kwds)
def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):
offset = DateOffset(**{offset_name: offset_n})
t = tstart + offset
if expected_utc_offset is not None:
assert get_utc_offset_hours(t) == expected_utc_offset
if offset_name == 'weeks':
# dates should match
assert t.date() == timedelta(days=7 * offset.kwds[
'weeks']) + tstart.date()
# expect the same day of week, hour of day, minute, second, ...
assert (t.dayofweek == tstart.dayofweek and
t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second)
elif offset_name == 'days':
# dates should match
assert timedelta(offset.kwds['days']) + tstart.date() == t.date()
# expect the same hour of day, minute, second, ...
assert (t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second)
elif offset_name in self.valid_date_offsets_singular:
# expect the singular offset value to match between tstart and t
datepart_offset = getattr(t, offset_name
if offset_name != 'weekday' else
'dayofweek')
assert datepart_offset == offset.kwds[offset_name]
else:
# the offset should be the same as if it was done in UTC
assert (t == (tstart.tz_convert('UTC') + offset)
.tz_convert('US/Pacific'))
def _make_timestamp(self, string, hrs_offset, tz):
if hrs_offset >= 0:
offset_string = '{hrs:02d}00'.format(hrs=hrs_offset)
else:
offset_string = '-{hrs:02d}00'.format(hrs=-1 * hrs_offset)
return Timestamp(string + offset_string).tz_convert(tz)
def test_fallback_plural(self):
# test moving from daylight savings to standard time
import dateutil
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_daylight']
hrs_post = utc_offsets['utc_offset_standard']
if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'):
# buggy ambiguous behavior in 2.6.0
# GH 14621
# https://github.com/dateutil/dateutil/issues/321
self._test_all_offsets(
n=3, tstart=self._make_timestamp(self.ts_pre_fallback,
hrs_pre, tz),
expected_utc_offset=hrs_post)
elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'):
# fixed, but skip the test
continue
def test_springforward_plural(self):
# test moving from standard to daylight savings
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
hrs_post = utc_offsets['utc_offset_daylight']
self._test_all_offsets(
n=3, tstart=self._make_timestamp(self.ts_pre_springfwd,
hrs_pre, tz),
expected_utc_offset=hrs_post)
def test_fallback_singular(self):
# in the case of singular offsets, we don't necessarily know which utc
# offset the new Timestamp will wind up in (the tz for 1 month may be
# different from 1 second) so we don't specify an expected_utc_offset
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(n=1, tstart=self._make_timestamp(
self.ts_pre_fallback, hrs_pre, tz), expected_utc_offset=None)
def test_springforward_singular(self):
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(n=1, tstart=self._make_timestamp(
self.ts_pre_springfwd, hrs_pre, tz), expected_utc_offset=None)
offset_classes = {MonthBegin: ['11/2/2012', '12/1/2012'],
MonthEnd: ['11/2/2012', '11/30/2012'],
BMonthBegin: ['11/2/2012', '12/3/2012'],
BMonthEnd: ['11/2/2012', '11/30/2012'],
CBMonthBegin: ['11/2/2012', '12/3/2012'],
CBMonthEnd: ['11/2/2012', '11/30/2012'],
SemiMonthBegin: ['11/2/2012', '11/15/2012'],
SemiMonthEnd: ['11/2/2012', '11/15/2012'],
Week: ['11/2/2012', '11/9/2012'],
YearBegin: ['11/2/2012', '1/1/2013'],
YearEnd: ['11/2/2012', '12/31/2012'],
BYearBegin: ['11/2/2012', '1/1/2013'],
BYearEnd: ['11/2/2012', '12/31/2012'],
QuarterBegin: ['11/2/2012', '12/1/2012'],
QuarterEnd: ['11/2/2012', '12/31/2012'],
BQuarterBegin: ['11/2/2012', '12/3/2012'],
BQuarterEnd: ['11/2/2012', '12/31/2012'],
Day: ['11/4/2012', '11/4/2012 23:00']}.items()
@pytest.mark.parametrize('tup', offset_classes)
def test_all_offset_classes(self, tup):
offset, test_values = tup
first = Timestamp(test_values[0], tz='US/Eastern') + offset()
second = Timestamp(test_values[1], tz='US/Eastern')
assert first == second
# ---------------------------------------------------------------------
def test_get_offset_day_error():
# subclass of _BaseOffset must override _day_opt attribute, or we should
# get a NotImplementedError
with pytest.raises(NotImplementedError):
DateOffset()._get_offset_day(datetime.now())
def test_valid_default_arguments(offset_types):
# GH#19142 check that the calling the constructors without passing
# any keyword arguments produce valid offsets
cls = offset_types
cls()
@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds)))
def test_valid_month_attributes(kwd, month_classes):
# GH#18226
cls = month_classes
# check that we cannot create e.g. MonthEnd(weeks=3)
with pytest.raises(TypeError):
cls(**{kwd: 3})
@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds)))
def test_valid_relativedelta_kwargs(kwd):
# Check that all the arguments specified in liboffsets.relativedelta_kwds
# are in fact valid relativedelta keyword args
DateOffset(**{kwd: 1})
@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds)))
def test_valid_tick_attributes(kwd, tick_classes):
# GH#18226
cls = tick_classes
# check that we cannot create e.g. Hour(weeks=3)
with pytest.raises(TypeError):
cls(**{kwd: 3})
def test_validate_n_error():
with pytest.raises(TypeError):
DateOffset(n='Doh!')
with pytest.raises(TypeError):
MonthBegin(n=timedelta(1))
with pytest.raises(TypeError):
BDay(n=np.array([1, 2], dtype=np.int64))
def test_require_integers(offset_types):
cls = offset_types
with pytest.raises(ValueError):
cls(n=1.5)
def test_tick_normalize_raises(tick_classes):
# check that trying to create a Tick object with normalize=True raises
# GH#21427
cls = tick_classes
with pytest.raises(ValueError):
cls(n=3, normalize=True)
def test_weeks_onoffset():
# GH#18510 Week with weekday = None, normalize = False should always
# be onOffset
offset = Week(n=2, weekday=None)
ts = Timestamp('1862-01-13 09:03:34.873477378+0210', tz='Africa/Lusaka')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
# negative n
offset = Week(n=2, weekday=None)
ts = Timestamp('1856-10-24 16:18:36.556360110-0717', tz='Pacific/Easter')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
def test_weekofmonth_onoffset():
# GH#18864
# Make sure that nanoseconds don't trip up onOffset (and with it apply)
offset = WeekOfMonth(n=2, week=2, weekday=0)
ts = Timestamp('1916-05-15 01:14:49.583410462+0422', tz='Asia/Qyzylorda')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
# negative n
offset = WeekOfMonth(n=-3, week=1, weekday=0)
ts = Timestamp('1980-12-08 03:38:52.878321185+0500', tz='Asia/Oral')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
def test_last_week_of_month_on_offset():
# GH#19036, GH#18977 _adjust_dst was incorrect for LastWeekOfMonth
offset = LastWeekOfMonth(n=4, weekday=6)
ts = Timestamp('1917-05-27 20:55:27.084284178+0200',
tz='Europe/Warsaw')
slow = (ts + offset) - offset == ts
fast = offset.onOffset(ts)
assert fast == slow
# negative n
offset = LastWeekOfMonth(n=-4, weekday=5)
ts = Timestamp('2005-08-27 05:01:42.799392561-0500',
tz='America/Rainy_River')
slow = (ts + offset) - offset == ts
fast = offset.onOffset(ts)
assert fast == slow
| bsd-3-clause |
adykstra/mne-python | mne/preprocessing/maxwell.py | 1 | 84078 | # -*- coding: utf-8 -*-
# Authors: Mark Wronkiewicz <wronk.mark@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
# Jussi Nurminen <jnu@iki.fi>
# License: BSD (3-clause)
from functools import partial
from math import factorial
from os import path as op
import numpy as np
from scipy import linalg
from .. import __version__
from ..annotations import _annotations_starts_stops
from ..bem import _check_origin
from ..chpi import quat_to_rot, rot_to_quat
from ..transforms import (_str_to_frame, _get_trans, Transform, apply_trans,
_find_vector_rotation, _cart_to_sph, _get_n_moments,
_sph_to_cart_partials, _deg_ord_idx, _average_quats,
_sh_complex_to_real, _sh_real_to_complex, _sh_negate)
from ..forward import _concatenate_coils, _prep_meg_channels, _create_meg_coils
from ..surface import _normalize_vectors
from ..io.constants import FIFF, FWD
from ..io.meas_info import _simplify_info
from ..io.proc_history import _read_ctc
from ..io.write import _generate_meas_id, DATE_NONE
from ..io import _loc_to_coil_trans, _coil_trans_to_loc, BaseRaw
from ..io.pick import pick_types, pick_info
from ..utils import (verbose, logger, _clean_names, warn, _time_mask, _pl,
_check_option)
from ..fixes import _get_args, _safe_svd, einsum
from ..channels.channels import _get_T1T2_mag_inds
# Note: MF uses single precision and some algorithms might use
# truncated versions of constants (e.g., μ0), which could lead to small
# differences between algorithms
@verbose
def maxwell_filter(raw, origin='auto', int_order=8, ext_order=3,
calibration=None, cross_talk=None, st_duration=None,
st_correlation=0.98, coord_frame='head', destination=None,
regularize='in', ignore_ref=False, bad_condition='error',
head_pos=None, st_fixed=True, st_only=False, mag_scale=100.,
skip_by_annotation=('edge', 'bad_acq_skip'), verbose=None):
u"""Maxwell filter data using multipole moments.
Parameters
----------
raw : instance of mne.io.Raw
Data to be filtered.
.. warning:: Automatic bad channel detection is not currently
implemented. It is critical to mark bad channels in
``raw.info['bads']`` prior to processing
in orider to prevent artifact spreading.
origin : array-like, shape (3,) | str
Origin of internal and external multipolar moment space in meters.
The default is ``'auto'``, which means ``(0., 0., 0.)`` when
``coord_frame='meg'``, and a head-digitization-based
origin fit using :func:`~mne.bem.fit_sphere_to_headshape`
when ``coord_frame='head'``. If automatic fitting fails (e.g., due
to having too few digitization points),
consider separately calling the fitting function with different
options or specifying the origin manually.
int_order : int
Order of internal component of spherical expansion.
ext_order : int
Order of external component of spherical expansion.
calibration : str | None
Path to the ``'.dat'`` file with fine calibration coefficients.
File can have 1D or 3D gradiometer imbalance correction.
This file is machine/site-specific.
cross_talk : str | None
Path to the FIF file with cross-talk correction information.
st_duration : float | None
If not None, apply spatiotemporal SSS with specified buffer duration
(in seconds). MaxFilter™'s default is 10.0 seconds in v2.2.
Spatiotemporal SSS acts as implicitly as a high-pass filter where the
cut-off frequency is 1/st_dur Hz. For this (and other) reasons, longer
buffers are generally better as long as your system can handle the
higher memory usage. To ensure that each window is processed
identically, choose a buffer length that divides evenly into your data.
Any data at the trailing edge that doesn't fit evenly into a whole
buffer window will be lumped into the previous buffer.
st_correlation : float
Correlation limit between inner and outer subspaces used to reject
ovwrlapping intersecting inner/outer signals during spatiotemporal SSS.
coord_frame : str
The coordinate frame that the ``origin`` is specified in, either
``'meg'`` or ``'head'``. For empty-room recordings that do not have
a head<->meg transform ``info['dev_head_t']``, the MEG coordinate
frame should be used.
destination : str | array-like, shape (3,) | None
The destination location for the head. Can be ``None``, which
will not change the head position, or a string path to a FIF file
containing a MEG device<->head transformation, or a 3-element array
giving the coordinates to translate to (with no rotations).
For example, ``destination=(0, 0, 0.04)`` would translate the bases
as ``--trans default`` would in MaxFilter™ (i.e., to the default
head location).
regularize : str | None
Basis regularization type, must be "in" or None.
"in" is the same algorithm as the "-regularize in" option in
MaxFilter™.
ignore_ref : bool
If True, do not include reference channels in compensation. This
option should be True for KIT files, since Maxwell filtering
with reference channels is not currently supported.
bad_condition : str
How to deal with ill-conditioned SSS matrices. Can be "error"
(default), "warning", "info", or "ignore".
head_pos : array | None
If array, movement compensation will be performed.
The array should be of shape (N, 10), holding the position
parameters as returned by e.g. `read_head_pos`.
.. versionadded:: 0.12
st_fixed : bool
If True (default), do tSSS using the median head position during the
``st_duration`` window. This is the default behavior of MaxFilter
and has been most extensively tested.
.. versionadded:: 0.12
st_only : bool
If True, only tSSS (temporal) projection of MEG data will be
performed on the output data. The non-tSSS parameters (e.g.,
``int_order``, ``calibration``, ``head_pos``, etc.) will still be
used to form the SSS bases used to calculate temporal projectors,
but the output MEG data will *only* have temporal projections
performed. Noise reduction from SSS basis multiplication,
cross-talk cancellation, movement compensation, and so forth
will not be applied to the data. This is useful, for example, when
evoked movement compensation will be performed with
:func:`~mne.epochs.average_movements`.
.. versionadded:: 0.12
mag_scale : float | str
The magenetometer scale-factor used to bring the magnetometers
to approximately the same order of magnitude as the gradiometers
(default 100.), as they have different units (T vs T/m).
Can be ``'auto'`` to use the reciprocal of the physical distance
between the gradiometer pickup loops (e.g., 0.0168 m yields
59.5 for VectorView).
.. versionadded:: 0.13
skip_by_annotation : str | list of str
If a string (or list of str), any annotation segment that begins
with the given string will not be included in filtering, and
segments on either side of the given excluded annotated segment
will be filtered separately (i.e., as independent signals).
The default ``('edge', 'bad_acq_skip')`` will separately filter
any segments that were concatenated by :func:`mne.concatenate_raws`
or :meth:`mne.io.Raw.append`, or separated during acquisition.
To disable, provide an empty list.
.. versionadded:: 0.17
%(verbose)s
Returns
-------
raw_sss : instance of mne.io.Raw
The raw data with Maxwell filtering applied.
See Also
--------
mne.preprocessing.mark_flat
mne.chpi.filter_chpi
mne.chpi.read_head_pos
mne.epochs.average_movements
Notes
-----
.. versionadded:: 0.11
Some of this code was adapted and relicensed (with BSD form) with
permission from Jussi Nurminen. These algorithms are based on work
from [1]_ and [2]_. It will likely use multiple CPU cores, see the
:ref:`FAQ <faq_cpu>` for more information.
.. warning:: Maxwell filtering in MNE is not designed or certified
for clinical use.
Compared to the MEGIN MaxFilter™ software, the MNE Maxwell filtering
routines currently provide the following features:
.. table::
:widths: auto
+-----------------------------------------------------------------------------+-----+-----------+
| Feature | MNE | MaxFilter |
+=============================================================================+=====+===========+
| Maxwell filtering software shielding | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Bad channel reconstruction | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Cross-talk cancellation | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Fine calibration correction (1D) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Fine calibration correction (3D) | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| Spatio-temporal SSS (tSSS) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Coordinate frame translation | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Regularization using information theory | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Movement compensation (raw) | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Movement compensation (:func:`epochs <mne.epochs.average_movements>`) | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| :func:`cHPI subtraction <mne.chpi.filter_chpi>` | ✓ | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Double floating point precision | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| Seamless processing of split (``-1.fif``) and concatenated files | ✓ | |
+-----------------------------------------------------------------------------+-----+-----------+
| Certified for clinical use | | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Automatic bad channel detection | | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
| Head position estimation | | ✓ |
+-----------------------------------------------------------------------------+-----+-----------+
Epoch-based movement compensation is described in [1]_.
Use of Maxwell filtering routines with non-Neuromag systems is currently
**experimental**. Worse results for non-Neuromag systems are expected due
to (at least):
* Missing fine-calibration and cross-talk cancellation data for
other systems.
* Processing with reference sensors has not been vetted.
* Regularization of components may not work well for all systems.
* Coil integration has not been optimized using Abramowitz/Stegun
definitions.
.. note:: Various Maxwell filtering algorithm components are covered by
patents owned by MEGIN. These patents include, but may not be
limited to:
- US2006031038 (Signal Space Separation)
- US6876196 (Head position determination)
- WO2005067789 (DC fields)
- WO2005078467 (MaxShield)
- WO2006114473 (Temporal Signal Space Separation)
These patents likely preclude the use of Maxwell filtering code
in commercial applications. Consult a lawyer if necessary.
Currently, in order to perform Maxwell filtering, the raw data must not
have any projectors applied. During Maxwell filtering, the spatial
structure of the data is modified, so projectors are discarded (unless
in ``st_only=True`` mode).
References
----------
.. [1] Taulu S. and Kajola M. "Presentation of electromagnetic
multichannel data: The signal space separation method,"
Journal of Applied Physics, vol. 97, pp. 124905 1-10, 2005.
http://lib.tkk.fi/Diss/2008/isbn9789512295654/article2.pdf
.. [2] Taulu S. and Simola J. "Spatiotemporal signal space separation
method for rejecting nearby interference in MEG measurements,"
Physics in Medicine and Biology, vol. 51, pp. 1759-1768, 2006.
http://lib.tkk.fi/Diss/2008/isbn9789512295654/article3.pdf
""" # noqa: E501
# There are an absurd number of different possible notations for spherical
# coordinates, which confounds the notation for spherical harmonics. Here,
# we purposefully stay away from shorthand notation in both and use
# explicit terms (like 'azimuth' and 'polar') to avoid confusion.
# See mathworld.wolfram.com/SphericalHarmonic.html for more discussion.
# Our code follows the same standard that ``scipy`` uses for ``sph_harm``.
# triage inputs ASAP to avoid late-thrown errors
if not isinstance(raw, BaseRaw):
raise TypeError('raw must be Raw, not %s' % type(raw))
_check_usable(raw)
_check_regularize(regularize)
st_correlation = float(st_correlation)
if st_correlation <= 0. or st_correlation > 1.:
raise ValueError('Need 0 < st_correlation <= 1., got %s'
% st_correlation)
_check_option('coord_frame', coord_frame, ['head', 'meg'])
head_frame = True if coord_frame == 'head' else False
recon_trans = _check_destination(destination, raw.info, head_frame)
onsets, ends = _annotations_starts_stops(
raw, skip_by_annotation, 'skip_by_annotation', invert=True)
max_samps = (ends - onsets).max()
if st_duration is not None:
st_duration = float(st_duration)
if not 0. < st_duration * raw.info['sfreq'] <= max_samps + 1.:
raise ValueError('st_duration (%0.1fs) must be between 0 and the '
'longest contiguous duration of the data '
'(%0.1fs).' % (st_duration,
max_samps / raw.info['sfreq']))
st_correlation = float(st_correlation)
st_duration = int(round(st_duration * raw.info['sfreq']))
if not 0. < st_correlation <= 1:
raise ValueError('st_correlation must be between 0. and 1.')
_check_option('bad_condition', bad_condition,
['error', 'warning', 'ignore', 'info'])
if raw.info['dev_head_t'] is None and coord_frame == 'head':
raise RuntimeError('coord_frame cannot be "head" because '
'info["dev_head_t"] is None; if this is an '
'empty room recording, consider using '
'coord_frame="meg"')
if st_only and st_duration is None:
raise ValueError('st_duration must not be None if st_only is True')
head_pos = _check_pos(head_pos, head_frame, raw, st_fixed,
raw.info['sfreq'])
_check_info(raw.info, sss=not st_only, tsss=st_duration is not None,
calibration=not st_only and calibration is not None,
ctc=not st_only and cross_talk is not None)
# Now we can actually get moving
logger.info('Maxwell filtering raw data')
add_channels = (head_pos[0] is not None) and not st_only
raw_sss, pos_picks = _copy_preload_add_channels(
raw, add_channels=add_channels)
del raw
if not st_only:
# remove MEG projectors, they won't apply now
_remove_meg_projs(raw_sss)
info = raw_sss.info
meg_picks, mag_picks, grad_picks, good_picks, mag_or_fine = \
_get_mf_picks(info, int_order, ext_order, ignore_ref)
# Magnetometers are scaled to improve numerical stability
coil_scale, mag_scale = _get_coil_scale(
meg_picks, mag_picks, grad_picks, mag_scale, info)
#
# Fine calibration processing (load fine cal and overwrite sensor geometry)
#
sss_cal = dict()
if calibration is not None:
calibration, sss_cal = _update_sensor_geometry(info, calibration,
ignore_ref)
mag_or_fine.fill(True) # all channels now have some mag-type data
# Determine/check the origin of the expansion
origin = _check_origin(origin, info, coord_frame, disp=True)
# Convert to the head frame
if coord_frame == 'meg' and info['dev_head_t'] is not None:
origin_head = apply_trans(info['dev_head_t'], origin)
else:
origin_head = origin
orig_origin, orig_coord_frame = origin, coord_frame
del origin, coord_frame
origin_head.setflags(write=False)
#
# Cross-talk processing
#
if cross_talk is not None:
sss_ctc = _read_ctc(cross_talk)
ctc_chs = sss_ctc['proj_items_chs']
meg_ch_names = [info['ch_names'][p] for p in meg_picks]
# checking for extra space ambiguity in channel names
# between old and new fif files
if meg_ch_names[0] not in ctc_chs:
ctc_chs = _clean_names(ctc_chs, remove_whitespace=True)
missing = sorted(list(set(meg_ch_names) - set(ctc_chs)))
if len(missing) != 0:
raise RuntimeError('Missing MEG channels in cross-talk matrix:\n%s'
% missing)
missing = sorted(list(set(ctc_chs) - set(meg_ch_names)))
if len(missing) > 0:
warn('Not all cross-talk channels in raw:\n%s' % missing)
ctc_picks = [ctc_chs.index(info['ch_names'][c])
for c in meg_picks[good_picks]]
assert len(ctc_picks) == len(good_picks) # otherwise we errored
ctc = sss_ctc['decoupler'][ctc_picks][:, ctc_picks]
# I have no idea why, but MF transposes this for storage..
sss_ctc['decoupler'] = sss_ctc['decoupler'].T.tocsc()
else:
sss_ctc = dict()
#
# Translate to destination frame (always use non-fine-cal bases)
#
exp = dict(origin=origin_head, int_order=int_order, ext_order=0)
all_coils = _prep_mf_coils(info, ignore_ref)
S_recon = _trans_sss_basis(exp, all_coils, recon_trans, coil_scale)
exp['ext_order'] = ext_order
# Reconstruct data from internal space only (Eq. 38), and rescale S_recon
S_recon /= coil_scale
if recon_trans is not None:
# warn if we have translated too far
diff = 1000 * (info['dev_head_t']['trans'][:3, 3] -
recon_trans['trans'][:3, 3])
dist = np.sqrt(np.sum(_sq(diff)))
if dist > 25.:
warn('Head position change is over 25 mm (%s) = %0.1f mm'
% (', '.join('%0.1f' % x for x in diff), dist))
# Reconstruct raw file object with spatiotemporal processed data
max_st = dict()
if st_duration is not None:
if st_only:
job = FIFF.FIFFV_SSS_JOB_TPROJ
else:
job = FIFF.FIFFV_SSS_JOB_ST
max_st.update(job=job, subspcorr=st_correlation,
buflen=st_duration / info['sfreq'])
logger.info(' Processing data using tSSS with st_duration=%s'
% max_st['buflen'])
st_when = 'before' if st_fixed else 'after' # relative to movecomp
else:
# st_duration from here on will act like the chunk size
st_duration = max(int(round(10. * info['sfreq'])), 1)
st_correlation = None
st_when = 'never'
st_duration = min(max_samps, st_duration)
del st_fixed
# Generate time points to break up data into equal-length windows
starts, stops = list(), list()
for onset, end in zip(onsets, ends):
read_lims = np.arange(onset, end + 1, st_duration)
if len(read_lims) == 1:
read_lims = np.concatenate([read_lims, [end]])
if read_lims[-1] != end:
read_lims[-1] = end
# fold it into the previous buffer
n_last_buf = read_lims[-1] - read_lims[-2]
if st_correlation is not None and len(read_lims) > 2:
if n_last_buf >= st_duration:
logger.info(
' Spatiotemporal window did not fit evenly into'
'contiguous data segment. %0.2f seconds were lumped '
'into the previous window.'
% ((n_last_buf - st_duration) / info['sfreq'],))
else:
logger.info(
' Contiguous data segment of duration %0.2f '
'seconds is too short to be processed with tSSS '
'using duration %0.2f'
% (n_last_buf / info['sfreq'],
st_duration / info['sfreq']))
assert len(read_lims) >= 2
assert read_lims[0] == onset and read_lims[-1] == end
starts.extend(read_lims[:-1])
stops.extend(read_lims[1:])
del read_lims
#
# Do the heavy lifting
#
# Figure out which transforms we need for each tSSS block
# (and transform pos[1] to times)
head_pos[1] = raw_sss.time_as_index(head_pos[1], use_rounding=True)
# Compute the first bit of pos_data for cHPI reporting
if info['dev_head_t'] is not None and head_pos[0] is not None:
this_pos_quat = np.concatenate([
rot_to_quat(info['dev_head_t']['trans'][:3, :3]),
info['dev_head_t']['trans'][:3, 3],
np.zeros(3)])
else:
this_pos_quat = None
_get_this_decomp_trans = partial(
_get_decomp, all_coils=all_coils,
cal=calibration, regularize=regularize,
exp=exp, ignore_ref=ignore_ref, coil_scale=coil_scale,
grad_picks=grad_picks, mag_picks=mag_picks, good_picks=good_picks,
mag_or_fine=mag_or_fine, bad_condition=bad_condition,
mag_scale=mag_scale)
S_decomp, pS_decomp, reg_moments, n_use_in = _get_this_decomp_trans(
info['dev_head_t'], t=0.)
reg_moments_0 = reg_moments.copy()
# Loop through buffer windows of data
n_sig = int(np.floor(np.log10(max(len(starts), 0)))) + 1
logger.info(' Processing %s data chunk%s' % (len(starts), _pl(starts)))
for ii, (start, stop) in enumerate(zip(starts, stops)):
tsss_valid = (stop - start) >= st_duration
rel_times = raw_sss.times[start:stop]
t_str = '%8.3f - %8.3f sec' % tuple(rel_times[[0, -1]])
t_str += ('(#%d/%d)' % (ii + 1, len(starts))).rjust(2 * n_sig + 5)
# Get original data
orig_data = raw_sss._data[meg_picks[good_picks], start:stop]
# This could just be np.empty if not st_only, but shouldn't be slow
# this way so might as well just always take the original data
out_meg_data = raw_sss._data[meg_picks, start:stop]
# Apply cross-talk correction
if cross_talk is not None:
orig_data = ctc.dot(orig_data)
out_pos_data = np.empty((len(pos_picks), stop - start))
# Figure out which positions to use
t_s_s_q_a = _trans_starts_stops_quats(head_pos, start, stop,
this_pos_quat)
n_positions = len(t_s_s_q_a[0])
# Set up post-tSSS or do pre-tSSS
if st_correlation is not None:
# If doing tSSS before movecomp...
resid = orig_data.copy() # to be safe let's operate on a copy
if st_when == 'after':
orig_in_data = np.empty((len(meg_picks), stop - start))
else: # 'before'
avg_trans = t_s_s_q_a[-1]
if avg_trans is not None:
# if doing movecomp
S_decomp_st, pS_decomp_st, _, n_use_in_st = \
_get_this_decomp_trans(avg_trans, t=rel_times[0])
else:
S_decomp_st, pS_decomp_st = S_decomp, pS_decomp
n_use_in_st = n_use_in
orig_in_data = np.dot(np.dot(S_decomp_st[:, :n_use_in_st],
pS_decomp_st[:n_use_in_st]),
resid)
resid -= np.dot(np.dot(S_decomp_st[:, n_use_in_st:],
pS_decomp_st[n_use_in_st:]), resid)
resid -= orig_in_data
# Here we operate on our actual data
proc = out_meg_data if st_only else orig_data
_do_tSSS(proc, orig_in_data, resid, st_correlation,
n_positions, t_str, tsss_valid)
if not st_only or st_when == 'after':
# Do movement compensation on the data
for trans, rel_start, rel_stop, this_pos_quat in \
zip(*t_s_s_q_a[:4]):
# Recalculate bases if necessary (trans will be None iff the
# first position in this interval is the same as last of the
# previous interval)
if trans is not None:
S_decomp, pS_decomp, reg_moments, n_use_in = \
_get_this_decomp_trans(trans, t=rel_times[rel_start])
# Determine multipole moments for this interval
mm_in = np.dot(pS_decomp[:n_use_in],
orig_data[:, rel_start:rel_stop])
# Our output data
if not st_only:
out_meg_data[:, rel_start:rel_stop] = \
np.dot(S_recon.take(reg_moments[:n_use_in], axis=1),
mm_in)
if len(pos_picks) > 0:
out_pos_data[:, rel_start:rel_stop] = \
this_pos_quat[:, np.newaxis]
# Transform orig_data to store just the residual
if st_when == 'after':
# Reconstruct data using original location from external
# and internal spaces and compute residual
rel_resid_data = resid[:, rel_start:rel_stop]
orig_in_data[:, rel_start:rel_stop] = \
np.dot(S_decomp[:, :n_use_in], mm_in)
rel_resid_data -= np.dot(np.dot(S_decomp[:, n_use_in:],
pS_decomp[n_use_in:]),
rel_resid_data)
rel_resid_data -= orig_in_data[:, rel_start:rel_stop]
# If doing tSSS at the end
if st_when == 'after':
_do_tSSS(out_meg_data, orig_in_data, resid, st_correlation,
n_positions, t_str, tsss_valid)
elif st_when == 'never' and head_pos[0] is not None:
logger.info(' Used % 2d head position%s for %s'
% (n_positions, _pl(n_positions), t_str))
raw_sss._data[meg_picks, start:stop] = out_meg_data
raw_sss._data[pos_picks, start:stop] = out_pos_data
# Update info
if not st_only:
info['dev_head_t'] = recon_trans # set the reconstruction transform
_update_sss_info(raw_sss, orig_origin, int_order, ext_order,
len(good_picks), orig_coord_frame, sss_ctc, sss_cal,
max_st, reg_moments_0, st_only)
logger.info('[done]')
return raw_sss
def _get_coil_scale(meg_picks, mag_picks, grad_picks, mag_scale, info):
"""Get the magnetometer scale factor."""
if isinstance(mag_scale, str):
if mag_scale != 'auto':
raise ValueError('mag_scale must be a float or "auto", got "%s"'
% mag_scale)
if len(mag_picks) in (0, len(meg_picks)):
mag_scale = 100. # only one coil type, doesn't matter
logger.info(' Setting mag_scale=%0.2f because only one '
'coil type is present' % mag_scale)
else:
# Find our physical distance between gradiometer pickup loops
# ("base line")
coils = _create_meg_coils([info['chs'][pick]
for pick in meg_picks], 'accurate')
grad_base = {coils[pick]['base'] for pick in grad_picks}
if len(grad_base) != 1 or list(grad_base)[0] <= 0:
raise RuntimeError('Could not automatically determine '
'mag_scale, could not find one '
'proper gradiometer distance from: %s'
% list(grad_base))
grad_base = list(grad_base)[0]
mag_scale = 1. / grad_base
logger.info(' Setting mag_scale=%0.2f based on gradiometer '
'distance %0.2f mm' % (mag_scale, 1000 * grad_base))
mag_scale = float(mag_scale)
coil_scale = np.ones((len(meg_picks), 1))
coil_scale[mag_picks] = mag_scale
return coil_scale, mag_scale
def _remove_meg_projs(inst):
"""Remove inplace existing MEG projectors (assumes inactive)."""
meg_picks = pick_types(inst.info, meg=True, exclude=[])
meg_channels = [inst.ch_names[pi] for pi in meg_picks]
non_meg_proj = list()
for proj in inst.info['projs']:
if not any(c in meg_channels for c in proj['data']['col_names']):
non_meg_proj.append(proj)
inst.add_proj(non_meg_proj, remove_existing=True, verbose=False)
def _check_destination(destination, info, head_frame):
"""Triage our reconstruction trans."""
if destination is None:
return info['dev_head_t']
if not head_frame:
raise RuntimeError('destination can only be set if using the '
'head coordinate frame')
if isinstance(destination, str):
recon_trans = _get_trans(destination, 'meg', 'head')[0]
elif isinstance(destination, Transform):
recon_trans = destination
else:
destination = np.array(destination, float)
if destination.shape != (3,):
raise ValueError('destination must be a 3-element vector, '
'str, or None')
recon_trans = np.eye(4)
recon_trans[:3, 3] = destination
recon_trans = Transform('meg', 'head', recon_trans)
if recon_trans.to_str != 'head' or recon_trans.from_str != 'MEG device':
raise RuntimeError('Destination transform is not MEG device -> head, '
'got %s -> %s' % (recon_trans.from_str,
recon_trans.to_str))
return recon_trans
def _prep_mf_coils(info, ignore_ref=True):
"""Get all coil integration information loaded and sorted."""
coils, comp_coils = _prep_meg_channels(
info, accurate=True, head_frame=False,
ignore_ref=ignore_ref, do_picking=False, verbose=False)[:2]
mag_mask = _get_mag_mask(coils)
if len(comp_coils) > 0:
meg_picks = pick_types(info, meg=True, ref_meg=False, exclude=[])
ref_picks = pick_types(info, meg=False, ref_meg=True, exclude=[])
inserts = np.searchsorted(meg_picks, ref_picks)
# len(inserts) == len(comp_coils)
for idx, comp_coil in zip(inserts[::-1], comp_coils[::-1]):
coils.insert(idx, comp_coil)
# Now we have:
# [c['chname'] for c in coils] ==
# [info['ch_names'][ii]
# for ii in pick_types(info, meg=True, ref_meg=True)]
# Now coils is a sorted list of coils. Time to do some vectorization.
n_coils = len(coils)
rmags = np.concatenate([coil['rmag'] for coil in coils])
cosmags = np.concatenate([coil['cosmag'] for coil in coils])
ws = np.concatenate([coil['w'] for coil in coils])
cosmags *= ws[:, np.newaxis]
del ws
n_int = np.array([len(coil['rmag']) for coil in coils])
bins = np.repeat(np.arange(len(n_int)), n_int)
bd = np.concatenate(([0], np.cumsum(n_int)))
slice_map = {ii: slice(start, stop)
for ii, (start, stop) in enumerate(zip(bd[:-1], bd[1:]))}
return rmags, cosmags, bins, n_coils, mag_mask, slice_map
def _trans_starts_stops_quats(pos, start, stop, this_pos_data):
"""Get all trans and limits we need."""
pos_idx = np.arange(*np.searchsorted(pos[1], [start, stop]))
used = np.zeros(stop - start, bool)
trans = list()
rel_starts = list()
rel_stops = list()
quats = list()
weights = list()
for ti in range(-1, len(pos_idx)):
# first iteration for this block of data
if ti < 0:
rel_start = 0
rel_stop = pos[1][pos_idx[0]] if len(pos_idx) > 0 else stop
rel_stop = rel_stop - start
if rel_start == rel_stop:
continue # our first pos occurs on first time sample
# Don't calculate S_decomp here, use the last one
trans.append(None) # meaning: use previous
quats.append(this_pos_data)
else:
rel_start = pos[1][pos_idx[ti]] - start
if ti == len(pos_idx) - 1:
rel_stop = stop - start
else:
rel_stop = pos[1][pos_idx[ti + 1]] - start
trans.append(pos[0][pos_idx[ti]])
quats.append(pos[2][pos_idx[ti]])
assert 0 <= rel_start
assert rel_start < rel_stop
assert rel_stop <= stop - start
assert not used[rel_start:rel_stop].any()
used[rel_start:rel_stop] = True
rel_starts.append(rel_start)
rel_stops.append(rel_stop)
weights.append(rel_stop - rel_start)
assert used.all()
# Use weighted average for average trans over the window
if this_pos_data is None:
avg_trans = None
else:
weights = np.array(weights)
quats = np.array(quats)
weights = weights / weights.sum().astype(float) # int -> float
avg_quat = _average_quats(quats[:, :3], weights)
avg_t = np.dot(weights, quats[:, 3:6])
avg_trans = np.vstack([
np.hstack([quat_to_rot(avg_quat), avg_t[:, np.newaxis]]),
[[0., 0., 0., 1.]]])
return trans, rel_starts, rel_stops, quats, avg_trans
def _do_tSSS(clean_data, orig_in_data, resid, st_correlation,
n_positions, t_str, tsss_valid):
"""Compute and apply SSP-like projection vectors based on min corr."""
if not tsss_valid:
t_proj = np.empty((clean_data.shape[1], 0))
else:
np.asarray_chkfinite(resid)
t_proj = _overlap_projector(orig_in_data, resid, st_correlation)
# Apply projector according to Eq. 12 in [2]_
msg = (' Projecting %2d intersecting tSSS component%s '
'for %s' % (t_proj.shape[1], _pl(t_proj.shape[1], ' '), t_str))
if n_positions > 1:
msg += ' (across %2d position%s)' % (n_positions,
_pl(n_positions, ' '))
logger.info(msg)
clean_data -= np.dot(np.dot(clean_data, t_proj), t_proj.T)
def _copy_preload_add_channels(raw, add_channels):
"""Load data for processing and (maybe) add cHPI pos channels."""
raw = raw.copy()
if add_channels:
kinds = [FIFF.FIFFV_QUAT_1, FIFF.FIFFV_QUAT_2, FIFF.FIFFV_QUAT_3,
FIFF.FIFFV_QUAT_4, FIFF.FIFFV_QUAT_5, FIFF.FIFFV_QUAT_6,
FIFF.FIFFV_HPI_G, FIFF.FIFFV_HPI_ERR, FIFF.FIFFV_HPI_MOV]
out_shape = (len(raw.ch_names) + len(kinds), len(raw.times))
out_data = np.zeros(out_shape, np.float64)
msg = ' Appending head position result channels and '
if raw.preload:
logger.info(msg + 'copying original raw data')
out_data[:len(raw.ch_names)] = raw._data
raw._data = out_data
else:
logger.info(msg + 'loading raw data from disk')
raw._preload_data(out_data[:len(raw.ch_names)], verbose=False)
raw._data = out_data
assert raw.preload is True
off = len(raw.ch_names)
chpi_chs = [
dict(ch_name='CHPI%03d' % (ii + 1), logno=ii + 1,
scanno=off + ii + 1, unit_mul=-1, range=1., unit=-1,
kind=kinds[ii], coord_frame=FIFF.FIFFV_COORD_UNKNOWN,
cal=1e-4, coil_type=FWD.COIL_UNKNOWN, loc=np.zeros(12))
for ii in range(len(kinds))]
raw.info['chs'].extend(chpi_chs)
raw.info._update_redundant()
raw.info._check_consistency()
assert raw._data.shape == (raw.info['nchan'], len(raw.times))
# Return the pos picks
pos_picks = np.arange(len(raw.ch_names) - len(chpi_chs),
len(raw.ch_names))
return raw, pos_picks
else:
if not raw.preload:
logger.info(' Loading raw data from disk')
raw.load_data(verbose=False)
else:
logger.info(' Using loaded raw data')
return raw, np.array([], int)
def _check_pos(pos, head_frame, raw, st_fixed, sfreq):
"""Check for a valid pos array and transform it to a more usable form."""
if pos is None:
return [None, np.array([-1])]
if not head_frame:
raise ValueError('positions can only be used if coord_frame="head"')
if not st_fixed:
warn('st_fixed=False is untested, use with caution!')
if not isinstance(pos, np.ndarray):
raise TypeError('pos must be an ndarray')
if pos.ndim != 2 or pos.shape[1] != 10:
raise ValueError('pos must be an array of shape (N, 10)')
t = pos[:, 0]
if not np.array_equal(t, np.unique(t)):
raise ValueError('Time points must unique and in ascending order')
# We need an extra 1e-3 (1 ms) here because MaxFilter outputs values
# only out to 3 decimal places
if not _time_mask(t, tmin=raw._first_time - 1e-3, tmax=None,
sfreq=sfreq).all():
raise ValueError('Head position time points must be greater than '
'first sample offset, but found %0.4f < %0.4f'
% (t[0], raw._first_time))
max_dist = np.sqrt(np.sum(pos[:, 4:7] ** 2, axis=1)).max()
if max_dist > 1.:
warn('Found a distance greater than 1 m (%0.3g m) from the device '
'origin, positions may be invalid and Maxwell filtering could '
'fail' % (max_dist,))
dev_head_ts = np.zeros((len(t), 4, 4))
dev_head_ts[:, 3, 3] = 1.
dev_head_ts[:, :3, 3] = pos[:, 4:7]
dev_head_ts[:, :3, :3] = quat_to_rot(pos[:, 1:4])
pos = [dev_head_ts, t - raw._first_time, pos[:, 1:]]
return pos
def _get_decomp(trans, all_coils, cal, regularize, exp, ignore_ref,
coil_scale, grad_picks, mag_picks, good_picks, mag_or_fine,
bad_condition, t, mag_scale):
"""Get a decomposition matrix and pseudoinverse matrices."""
#
# Fine calibration processing (point-like magnetometers and calib. coeffs)
#
S_decomp = _get_s_decomp(exp, all_coils, trans, coil_scale, cal,
ignore_ref, grad_picks, mag_picks, good_picks,
mag_scale)
#
# Regularization
#
S_decomp, pS_decomp, sing, reg_moments, n_use_in = _regularize(
regularize, exp, S_decomp, mag_or_fine, t=t)
# Pseudo-inverse of total multipolar moment basis set (Part of Eq. 37)
cond = sing[0] / sing[-1]
logger.debug(' Decomposition matrix condition: %0.1f' % cond)
if bad_condition != 'ignore' and cond >= 1000.:
msg = 'Matrix is badly conditioned: %0.0f >= 1000' % cond
if bad_condition == 'error':
raise RuntimeError(msg)
elif bad_condition == 'warning':
warn(msg)
else: # condition == 'info'
logger.info(msg)
# Build in our data scaling here
pS_decomp *= coil_scale[good_picks].T
S_decomp /= coil_scale[good_picks]
return S_decomp, pS_decomp, reg_moments, n_use_in
def _get_s_decomp(exp, all_coils, trans, coil_scale, cal, ignore_ref,
grad_picks, mag_picks, good_picks, mag_scale):
"""Get S_decomp."""
S_decomp = _trans_sss_basis(exp, all_coils, trans, coil_scale)
if cal is not None:
# Compute point-like mags to incorporate gradiometer imbalance
grad_cals = _sss_basis_point(exp, trans, cal, ignore_ref, mag_scale)
# Add point like magnetometer data to bases.
S_decomp[grad_picks, :] += grad_cals
# Scale magnetometers by calibration coefficient
S_decomp[mag_picks, :] /= cal['mag_cals']
# We need to be careful about KIT gradiometers
S_decomp = S_decomp[good_picks]
return S_decomp
@verbose
def _regularize(regularize, exp, S_decomp, mag_or_fine, t, verbose=None):
"""Regularize a decomposition matrix."""
# ALWAYS regularize the out components according to norm, since
# gradiometer-only setups (e.g., KIT) can have zero first-order
# (homogeneous field) components
int_order, ext_order = exp['int_order'], exp['ext_order']
n_in, n_out = _get_n_moments([int_order, ext_order])
t_str = '%8.3f' % t
if regularize is not None: # regularize='in'
in_removes, out_removes = _regularize_in(
int_order, ext_order, S_decomp, mag_or_fine)
else:
in_removes = []
out_removes = _regularize_out(int_order, ext_order, mag_or_fine)
reg_in_moments = np.setdiff1d(np.arange(n_in), in_removes)
reg_out_moments = np.setdiff1d(np.arange(n_in, n_in + n_out),
out_removes)
n_use_in = len(reg_in_moments)
n_use_out = len(reg_out_moments)
reg_moments = np.concatenate((reg_in_moments, reg_out_moments))
S_decomp = S_decomp.take(reg_moments, axis=1)
pS_decomp, sing = _col_norm_pinv(S_decomp.copy())
if regularize is not None or n_use_out != n_out:
logger.info(' Using %s/%s harmonic components for %s '
'(%s/%s in, %s/%s out)'
% (n_use_in + n_use_out, n_in + n_out, t_str,
n_use_in, n_in, n_use_out, n_out))
return S_decomp, pS_decomp, sing, reg_moments, n_use_in
def _get_mf_picks(info, int_order, ext_order, ignore_ref=False):
"""Pick types for Maxwell filtering."""
# Check for T1/T2 mag types
mag_inds_T1T2 = _get_T1T2_mag_inds(info)
if len(mag_inds_T1T2) > 0:
warn('%d T1/T2 magnetometer channel types found. If using SSS, it is '
'advised to replace coil types using "fix_mag_coil_types".'
% len(mag_inds_T1T2))
# Get indices of channels to use in multipolar moment calculation
ref = not ignore_ref
meg_picks = pick_types(info, meg=True, ref_meg=ref, exclude=[])
meg_info = pick_info(_simplify_info(info), meg_picks)
del info
good_picks = pick_types(meg_info, meg=True, ref_meg=ref, exclude='bads')
n_bases = _get_n_moments([int_order, ext_order]).sum()
if n_bases > len(good_picks):
raise ValueError('Number of requested bases (%s) exceeds number of '
'good sensors (%s)' % (str(n_bases), len(good_picks)))
recons = [ch for ch in meg_info['bads']]
if len(recons) > 0:
logger.info(' Bad MEG channels being reconstructed: %s' % recons)
else:
logger.info(' No bad MEG channels')
ref_meg = False if ignore_ref else 'mag'
mag_picks = pick_types(meg_info, meg='mag', ref_meg=ref_meg, exclude=[])
ref_meg = False if ignore_ref else 'grad'
grad_picks = pick_types(meg_info, meg='grad', ref_meg=ref_meg, exclude=[])
assert len(mag_picks) + len(grad_picks) == len(meg_info['ch_names'])
# Determine which are magnetometers for external basis purposes
mag_or_fine = np.zeros(len(meg_picks), bool)
mag_or_fine[mag_picks] = True
# KIT gradiometers are marked as having units T, not T/M (argh)
# We need a separate variable for this because KIT grads should be
# treated mostly like magnetometers (e.g., scaled by 100) for reg
coil_types = np.array([ch['coil_type'] for ch in meg_info['chs']])
mag_or_fine[(coil_types & 0xFFFF) == FIFF.FIFFV_COIL_KIT_GRAD] = False
# The same thing goes for CTF gradiometers...
ctf_grads = [FIFF.FIFFV_COIL_CTF_GRAD,
FIFF.FIFFV_COIL_CTF_REF_GRAD,
FIFF.FIFFV_COIL_CTF_OFFDIAG_REF_GRAD]
mag_or_fine[np.in1d(coil_types, ctf_grads)] = False
msg = (' Processing %s gradiometers and %s magnetometers'
% (len(grad_picks), len(mag_picks)))
n_kit = len(mag_picks) - mag_or_fine.sum()
if n_kit > 0:
msg += ' (of which %s are actually KIT gradiometers)' % n_kit
logger.info(msg)
return meg_picks, mag_picks, grad_picks, good_picks, mag_or_fine
def _check_regularize(regularize):
"""Ensure regularize is valid."""
if not (regularize is None or (isinstance(regularize, str) and
regularize in ('in',))):
raise ValueError('regularize must be None or "in"')
def _check_usable(inst):
"""Ensure our data are clean."""
if inst.proj:
raise RuntimeError('Projectors cannot be applied to data during '
'Maxwell filtering.')
current_comp = inst.compensation_grade
if current_comp not in (0, None):
raise RuntimeError('Maxwell filter cannot be done on compensated '
'channels, but data have been compensated with '
'grade %s.' % current_comp)
def _col_norm_pinv(x):
"""Compute the pinv with column-normalization to stabilize calculation.
Note: will modify/overwrite x.
"""
norm = np.sqrt(np.sum(x * x, axis=0))
x /= norm
u, s, v = _safe_svd(x, full_matrices=False, **check_disable)
v /= norm
return np.dot(v.T * 1. / s, u.T), s
def _sq(x):
"""Square quickly."""
return x * x
def _check_finite(data):
"""Ensure data is finite."""
if not np.isfinite(data).all():
raise RuntimeError('data contains non-finite numbers')
def _sph_harm_norm(order, degree):
"""Compute normalization factor for spherical harmonics."""
# we could use scipy.special.poch(degree + order + 1, -2 * order)
# here, but it's slower for our fairly small degree
norm = np.sqrt((2 * degree + 1.) / (4 * np.pi))
if order != 0:
norm *= np.sqrt(factorial(degree - order) /
float(factorial(degree + order)))
return norm
def _concatenate_sph_coils(coils):
"""Concatenate MEG coil parameters for spherical harmoncs."""
rs = np.concatenate([coil['r0_exey'] for coil in coils])
wcoils = np.concatenate([coil['w'] for coil in coils])
ezs = np.concatenate([np.tile(coil['ez'][np.newaxis, :],
(len(coil['rmag']), 1))
for coil in coils])
bins = np.repeat(np.arange(len(coils)),
[len(coil['rmag']) for coil in coils])
return rs, wcoils, ezs, bins
_mu_0 = 4e-7 * np.pi # magnetic permeability
def _get_mag_mask(coils):
"""Get the coil_scale for Maxwell filtering."""
return np.array([coil['coil_class'] == FWD.COILC_MAG for coil in coils])
def _sss_basis_basic(exp, coils, mag_scale=100., method='standard'):
"""Compute SSS basis using non-optimized (but more readable) algorithms."""
from scipy.special import sph_harm
int_order, ext_order = exp['int_order'], exp['ext_order']
origin = exp['origin']
# Compute vector between origin and coil, convert to spherical coords
if method == 'standard':
# Get position, normal, weights, and number of integration pts.
rmags, cosmags, ws, bins = _concatenate_coils(coils)
rmags -= origin
# Convert points to spherical coordinates
rad, az, pol = _cart_to_sph(rmags).T
cosmags *= ws[:, np.newaxis]
del rmags, ws
out_type = np.float64
else: # testing equivalence method
rs, wcoils, ezs, bins = _concatenate_sph_coils(coils)
rs -= origin
rad, az, pol = _cart_to_sph(rs).T
ezs *= wcoils[:, np.newaxis]
del rs, wcoils
out_type = np.complex128
del origin
# Set up output matrices
n_in, n_out = _get_n_moments([int_order, ext_order])
S_tot = np.empty((len(coils), n_in + n_out), out_type)
S_in = S_tot[:, :n_in]
S_out = S_tot[:, n_in:]
coil_scale = np.ones((len(coils), 1))
coil_scale[_get_mag_mask(coils)] = mag_scale
# Compute internal/external basis vectors (exclude degree 0; L/RHS Eq. 5)
for degree in range(1, max(int_order, ext_order) + 1):
# Only loop over positive orders, negative orders are handled
# for efficiency within
for order in range(degree + 1):
S_in_out = list()
grads_in_out = list()
# Same spherical harmonic is used for both internal and external
sph = sph_harm(order, degree, az, pol)
sph_norm = _sph_harm_norm(order, degree)
# Compute complex gradient for all integration points
# in spherical coordinates (Eq. 6). The gradient for rad, az, pol
# is obtained by taking the partial derivative of Eq. 4 w.r.t. each
# coordinate.
az_factor = 1j * order * sph / np.sin(np.maximum(pol, 1e-16))
pol_factor = (-sph_norm * np.sin(pol) * np.exp(1j * order * az) *
_alegendre_deriv(order, degree, np.cos(pol)))
if degree <= int_order:
S_in_out.append(S_in)
in_norm = _mu_0 * rad ** -(degree + 2)
g_rad = in_norm * (-(degree + 1.) * sph)
g_az = in_norm * az_factor
g_pol = in_norm * pol_factor
grads_in_out.append(_sph_to_cart_partials(az, pol,
g_rad, g_az, g_pol))
if degree <= ext_order:
S_in_out.append(S_out)
out_norm = _mu_0 * rad ** (degree - 1)
g_rad = out_norm * degree * sph
g_az = out_norm * az_factor
g_pol = out_norm * pol_factor
grads_in_out.append(_sph_to_cart_partials(az, pol,
g_rad, g_az, g_pol))
for spc, grads in zip(S_in_out, grads_in_out):
# We could convert to real at the end, but it's more efficient
# to do it now
if method == 'standard':
grads_pos_neg = [_sh_complex_to_real(grads, order)]
orders_pos_neg = [order]
# Deal with the negative orders
if order > 0:
# it's faster to use the conjugation property for
# our normalized spherical harmonics than recalculate
grads_pos_neg.append(_sh_complex_to_real(
_sh_negate(grads, order), -order))
orders_pos_neg.append(-order)
for gr, oo in zip(grads_pos_neg, orders_pos_neg):
# Gradients dotted w/integration point weighted normals
gr = einsum('ij,ij->i', gr, cosmags)
vals = np.bincount(bins, gr, len(coils))
spc[:, _deg_ord_idx(degree, oo)] = -vals
else:
grads = einsum('ij,ij->i', grads, ezs)
v = (np.bincount(bins, grads.real, len(coils)) +
1j * np.bincount(bins, grads.imag, len(coils)))
spc[:, _deg_ord_idx(degree, order)] = -v
if order > 0:
spc[:, _deg_ord_idx(degree, -order)] = \
-_sh_negate(v, order)
# Scale magnetometers
S_tot *= coil_scale
if method != 'standard':
# Eventually we could probably refactor this for 2x mem (and maybe CPU)
# savings by changing how spc/S_tot is assigned above (real only)
S_tot = _bases_complex_to_real(S_tot, int_order, ext_order)
return S_tot
def _sss_basis(exp, all_coils):
"""Compute SSS basis for given conditions.
Parameters
----------
exp : dict
Must contain the following keys:
origin : ndarray, shape (3,)
Origin of the multipolar moment space in millimeters
int_order : int
Order of the internal multipolar moment space
ext_order : int
Order of the external multipolar moment space
coils : list
List of MEG coils. Each should contain coil information dict specifying
position, normals, weights, number of integration points and channel
type. All coil geometry must be in the same coordinate frame
as ``origin`` (``head`` or ``meg``).
Returns
-------
bases : ndarray, shape (n_coils, n_mult_moments)
Internal and external basis sets as a single ndarray.
Notes
-----
Does not incorporate magnetometer scaling factor or normalize spaces.
Adapted from code provided by Jukka Nenonen.
"""
rmags, cosmags, bins, n_coils = all_coils[:4]
int_order, ext_order = exp['int_order'], exp['ext_order']
n_in, n_out = _get_n_moments([int_order, ext_order])
S_tot = np.empty((n_coils, n_in + n_out), np.float64)
rmags = rmags - exp['origin']
S_in = S_tot[:, :n_in]
S_out = S_tot[:, n_in:]
# do the heavy lifting
max_order = max(int_order, ext_order)
L = _tabular_legendre(rmags, max_order)
phi = np.arctan2(rmags[:, 1], rmags[:, 0])
r_n = np.sqrt(np.sum(rmags * rmags, axis=1))
r_xy = np.sqrt(rmags[:, 0] * rmags[:, 0] + rmags[:, 1] * rmags[:, 1])
cos_pol = rmags[:, 2] / r_n # cos(theta); theta 0...pi
sin_pol = np.sqrt(1. - cos_pol * cos_pol) # sin(theta)
z_only = (r_xy <= 1e-16)
r_xy[z_only] = 1.
cos_az = rmags[:, 0] / r_xy # cos(phi)
cos_az[z_only] = 1.
sin_az = rmags[:, 1] / r_xy # sin(phi)
sin_az[z_only] = 0.
del rmags
# Appropriate vector spherical harmonics terms
# JNE 2012-02-08: modified alm -> 2*alm, blm -> -2*blm
r_nn2 = r_n.copy()
r_nn1 = 1.0 / (r_n * r_n)
for degree in range(max_order + 1):
if degree <= ext_order:
r_nn1 *= r_n # r^(l-1)
if degree <= int_order:
r_nn2 *= r_n # r^(l+2)
# mu_0*sqrt((2l+1)/4pi (l-m)!/(l+m)!)
mult = 2e-7 * np.sqrt((2 * degree + 1) * np.pi)
if degree > 0:
idx = _deg_ord_idx(degree, 0)
# alpha
if degree <= int_order:
b_r = mult * (degree + 1) * L[degree][0] / r_nn2
b_pol = -mult * L[degree][1] / r_nn2
S_in[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, 0., b_pol,
cosmags, bins, n_coils)
# beta
if degree <= ext_order:
b_r = -mult * degree * L[degree][0] * r_nn1
b_pol = -mult * L[degree][1] * r_nn1
S_out[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, 0., b_pol,
cosmags, bins, n_coils)
for order in range(1, degree + 1):
ord_phi = order * phi
sin_order = np.sin(ord_phi)
cos_order = np.cos(ord_phi)
mult /= np.sqrt((degree - order + 1) * (degree + order))
factor = mult * np.sqrt(2) # equivalence fix (MF uses 2.)
# Real
idx = _deg_ord_idx(degree, order)
r_fact = factor * L[degree][order] * cos_order
az_fact = factor * order * sin_order * L[degree][order]
pol_fact = -factor * (L[degree][order + 1] -
(degree + order) * (degree - order + 1) *
L[degree][order - 1]) * cos_order
# alpha
if degree <= int_order:
b_r = (degree + 1) * r_fact / r_nn2
b_az = az_fact / (sin_pol * r_nn2)
b_az[z_only] = 0.
b_pol = pol_fact / (2 * r_nn2)
S_in[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
# beta
if degree <= ext_order:
b_r = -degree * r_fact * r_nn1
b_az = az_fact * r_nn1 / sin_pol
b_az[z_only] = 0.
b_pol = pol_fact * r_nn1 / 2.
S_out[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
# Imaginary
idx = _deg_ord_idx(degree, -order)
r_fact = factor * L[degree][order] * sin_order
az_fact = factor * order * cos_order * L[degree][order]
pol_fact = factor * (L[degree][order + 1] -
(degree + order) * (degree - order + 1) *
L[degree][order - 1]) * sin_order
# alpha
if degree <= int_order:
b_r = -(degree + 1) * r_fact / r_nn2
b_az = az_fact / (sin_pol * r_nn2)
b_az[z_only] = 0.
b_pol = pol_fact / (2 * r_nn2)
S_in[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
# beta
if degree <= ext_order:
b_r = degree * r_fact * r_nn1
b_az = az_fact * r_nn1 / sin_pol
b_az[z_only] = 0.
b_pol = pol_fact * r_nn1 / 2.
S_out[:, idx] = _integrate_points(
cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils)
return S_tot
def _integrate_points(cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol,
cosmags, bins, n_coils):
"""Integrate points in spherical coords."""
grads = _sp_to_cart(cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol).T
grads = einsum('ij,ij->i', grads, cosmags)
return np.bincount(bins, grads, n_coils)
def _tabular_legendre(r, nind):
"""Compute associated Legendre polynomials."""
r_n = np.sqrt(np.sum(r * r, axis=1))
x = r[:, 2] / r_n # cos(theta)
L = list()
for degree in range(nind + 1):
L.append(np.zeros((degree + 2, len(r))))
L[0][0] = 1.
pnn = 1.
fact = 1.
sx2 = np.sqrt((1. - x) * (1. + x))
for degree in range(nind + 1):
L[degree][degree] = pnn
pnn *= (-fact * sx2)
fact += 2.
if degree < nind:
L[degree + 1][degree] = x * (2 * degree + 1) * L[degree][degree]
if degree >= 2:
for order in range(degree - 1):
L[degree][order] = (x * (2 * degree - 1) *
L[degree - 1][order] -
(degree + order - 1) *
L[degree - 2][order]) / (degree - order)
return L
def _sp_to_cart(cos_az, sin_az, cos_pol, sin_pol, b_r, b_az, b_pol):
"""Convert spherical coords to cartesian."""
return np.array([(sin_pol * cos_az * b_r +
cos_pol * cos_az * b_pol - sin_az * b_az),
(sin_pol * sin_az * b_r +
cos_pol * sin_az * b_pol + cos_az * b_az),
cos_pol * b_r - sin_pol * b_pol])
def _get_degrees_orders(order):
"""Get the set of degrees used in our basis functions."""
degrees = np.zeros(_get_n_moments(order), int)
orders = np.zeros_like(degrees)
for degree in range(1, order + 1):
# Only loop over positive orders, negative orders are handled
# for efficiency within
for order in range(degree + 1):
ii = _deg_ord_idx(degree, order)
degrees[ii] = degree
orders[ii] = order
ii = _deg_ord_idx(degree, -order)
degrees[ii] = degree
orders[ii] = -order
return degrees, orders
def _alegendre_deriv(order, degree, val):
"""Compute the derivative of the associated Legendre polynomial at a value.
Parameters
----------
order : int
Order of spherical harmonic. (Usually) corresponds to 'm'.
degree : int
Degree of spherical harmonic. (Usually) corresponds to 'l'.
val : float
Value to evaluate the derivative at.
Returns
-------
dPlm : float
Associated Legendre function derivative
"""
from scipy.special import lpmv
assert order >= 0
return (order * val * lpmv(order, degree, val) + (degree + order) *
(degree - order + 1.) * np.sqrt(1. - val * val) *
lpmv(order - 1, degree, val)) / (1. - val * val)
def _bases_complex_to_real(complex_tot, int_order, ext_order):
"""Convert complex spherical harmonics to real."""
n_in, n_out = _get_n_moments([int_order, ext_order])
complex_in = complex_tot[:, :n_in]
complex_out = complex_tot[:, n_in:]
real_tot = np.empty(complex_tot.shape, np.float64)
real_in = real_tot[:, :n_in]
real_out = real_tot[:, n_in:]
for comp, real, exp_order in zip([complex_in, complex_out],
[real_in, real_out],
[int_order, ext_order]):
for deg in range(1, exp_order + 1):
for order in range(deg + 1):
idx_pos = _deg_ord_idx(deg, order)
idx_neg = _deg_ord_idx(deg, -order)
real[:, idx_pos] = _sh_complex_to_real(comp[:, idx_pos], order)
if order != 0:
# This extra mult factor baffles me a bit, but it works
# in round-trip testing, so we'll keep it :(
mult = (-1 if order % 2 == 0 else 1)
real[:, idx_neg] = mult * _sh_complex_to_real(
comp[:, idx_neg], -order)
return real_tot
def _bases_real_to_complex(real_tot, int_order, ext_order):
"""Convert real spherical harmonics to complex."""
n_in, n_out = _get_n_moments([int_order, ext_order])
real_in = real_tot[:, :n_in]
real_out = real_tot[:, n_in:]
comp_tot = np.empty(real_tot.shape, np.complex128)
comp_in = comp_tot[:, :n_in]
comp_out = comp_tot[:, n_in:]
for real, comp, exp_order in zip([real_in, real_out],
[comp_in, comp_out],
[int_order, ext_order]):
for deg in range(1, exp_order + 1):
# only loop over positive orders, figure out neg from pos
for order in range(deg + 1):
idx_pos = _deg_ord_idx(deg, order)
idx_neg = _deg_ord_idx(deg, -order)
this_comp = _sh_real_to_complex([real[:, idx_pos],
real[:, idx_neg]], order)
comp[:, idx_pos] = this_comp
comp[:, idx_neg] = _sh_negate(this_comp, order)
return comp_tot
def _check_info(info, sss=True, tsss=True, calibration=True, ctc=True):
"""Ensure that Maxwell filtering has not been applied yet."""
for ent in info['proc_history']:
for msg, key, doing in (('SSS', 'sss_info', sss),
('tSSS', 'max_st', tsss),
('fine calibration', 'sss_cal', calibration),
('cross-talk cancellation', 'sss_ctc', ctc)):
if not doing:
continue
if len(ent['max_info'][key]) > 0:
raise RuntimeError('Maxwell filtering %s step has already '
'been applied, cannot reapply' % msg)
def _update_sss_info(raw, origin, int_order, ext_order, nchan, coord_frame,
sss_ctc, sss_cal, max_st, reg_moments, st_only):
"""Update info inplace after Maxwell filtering.
Parameters
----------
raw : instance of mne.io.Raw
Data to be filtered
origin : array-like, shape (3,)
Origin of internal and external multipolar moment space in head coords
and in millimeters
int_order : int
Order of internal component of spherical expansion
ext_order : int
Order of external component of spherical expansion
nchan : int
Number of sensors
sss_ctc : dict
The cross talk information.
sss_cal : dict
The calibration information.
max_st : dict
The tSSS information.
reg_moments : ndarray | slice
The moments that were used.
st_only : bool
Whether tSSS only was performed.
"""
n_in, n_out = _get_n_moments([int_order, ext_order])
raw.info['maxshield'] = False
components = np.zeros(n_in + n_out).astype('int32')
components[reg_moments] = 1
sss_info_dict = dict(in_order=int_order, out_order=ext_order,
nchan=nchan, origin=origin.astype('float32'),
job=FIFF.FIFFV_SSS_JOB_FILTER,
nfree=np.sum(components[:n_in]),
frame=_str_to_frame[coord_frame],
components=components)
max_info_dict = dict(max_st=max_st)
if st_only:
max_info_dict.update(sss_info=dict(), sss_cal=dict(), sss_ctc=dict())
else:
max_info_dict.update(sss_info=sss_info_dict, sss_cal=sss_cal,
sss_ctc=sss_ctc)
# Reset 'bads' for any MEG channels since they've been reconstructed
_reset_meg_bads(raw.info)
block_id = _generate_meas_id()
raw.info['proc_history'].insert(0, dict(
max_info=max_info_dict, block_id=block_id, date=DATE_NONE,
creator='mne-python v%s' % __version__, experimenter=''))
def _reset_meg_bads(info):
"""Reset MEG bads."""
meg_picks = pick_types(info, meg=True, exclude=[])
info['bads'] = [bad for bad in info['bads']
if info['ch_names'].index(bad) not in meg_picks]
check_disable = dict() # not available on really old versions of SciPy
if 'check_finite' in _get_args(linalg.svd):
check_disable['check_finite'] = False
def _orth_overwrite(A):
"""Create a slightly more efficient 'orth'."""
# adapted from scipy/linalg/decomp_svd.py
u, s = _safe_svd(A, full_matrices=False, **check_disable)[:2]
M, N = A.shape
eps = np.finfo(float).eps
tol = max(M, N) * np.amax(s) * eps
num = np.sum(s > tol, dtype=int)
return u[:, :num]
def _overlap_projector(data_int, data_res, corr):
"""Calculate projector for removal of subspace intersection in tSSS."""
# corr necessary to deal with noise when finding identical signal
# directions in the subspace. See the end of the Results section in [2]_
# Note that the procedure here is an updated version of [2]_ (and used in
# MF's tSSS) that uses residuals instead of internal/external spaces
# directly. This provides more degrees of freedom when analyzing for
# intersections between internal and external spaces.
# Normalize data, then compute orth to get temporal bases. Matrices
# must have shape (n_samps x effective_rank) when passed into svd
# computation
# we use np.linalg.norm instead of sp.linalg.norm here: ~2x faster!
n = np.linalg.norm(data_int)
n = 1. if n == 0 else n # all-zero data should gracefully continue
data_int = _orth_overwrite((data_int / n).T)
n = np.linalg.norm(data_res)
n = 1. if n == 0 else n
data_res = _orth_overwrite((data_res / n).T)
if data_int.shape[1] == 0 or data_res.shape[1] == 0:
return np.empty((data_int.shape[0], 0))
Q_int = linalg.qr(data_int,
overwrite_a=True, mode='economic', **check_disable)[0].T
Q_res = linalg.qr(data_res,
overwrite_a=True, mode='economic', **check_disable)[0]
C_mat = np.dot(Q_int, Q_res)
del Q_int
# Compute angles between subspace and which bases to keep
S_intersect, Vh_intersect = _safe_svd(C_mat, full_matrices=False,
**check_disable)[1:]
del C_mat
intersect_mask = (S_intersect >= corr)
del S_intersect
# Compute projection operator as (I-LL_T) Eq. 12 in [2]_
# V_principal should be shape (n_time_pts x n_retained_inds)
Vh_intersect = Vh_intersect[intersect_mask].T
V_principal = np.dot(Q_res, Vh_intersect)
return V_principal
def _update_sensor_geometry(info, fine_cal, ignore_ref):
"""Replace sensor geometry information and reorder cal_chs."""
from ._fine_cal import read_fine_calibration
logger.info(' Using fine calibration %s' % op.basename(fine_cal))
fine_cal = read_fine_calibration(fine_cal) # filename -> dict
ch_names = _clean_names(info['ch_names'], remove_whitespace=True)
info_to_cal = dict()
missing = list()
for ci, name in enumerate(fine_cal['ch_names']):
if name not in ch_names:
missing.append(name)
else:
oi = ch_names.index(name)
info_to_cal[oi] = ci
meg_picks = pick_types(info, meg=True, exclude=[])
if len(info_to_cal) != len(meg_picks):
raise RuntimeError(
'Not all MEG channels found in fine calibration file, missing:\n%s'
% sorted(list({ch_names[pick] for pick in meg_picks} -
set(fine_cal['ch_names']))))
if len(missing):
warn('Found cal channel%s not in data: %s' % (_pl(missing), missing))
grad_picks = pick_types(info, meg='grad', exclude=())
mag_picks = pick_types(info, meg='mag', exclude=())
# Determine gradiometer imbalances and magnetometer calibrations
grad_imbalances = np.array([fine_cal['imb_cals'][info_to_cal[gi]]
for gi in grad_picks]).T
if grad_imbalances.shape[0] not in [1, 3]:
raise ValueError('Must have 1 (x) or 3 (x, y, z) point-like ' +
'magnetometers. Currently have %i' %
grad_imbalances.shape[0])
mag_cals = np.array([fine_cal['imb_cals'][info_to_cal[mi]]
for mi in mag_picks])
# Now let's actually construct our point-like adjustment coils for grads
grad_coilsets = _get_grad_point_coilsets(
info, n_types=len(grad_imbalances), ignore_ref=ignore_ref)
calibration = dict(grad_imbalances=grad_imbalances,
grad_coilsets=grad_coilsets, mag_cals=mag_cals)
# Replace sensor locations (and track differences) for fine calibration
ang_shift = np.zeros((len(fine_cal['ch_names']), 3))
used = np.zeros(len(info['chs']), bool)
cal_corrs = list()
cal_chans = list()
adjust_logged = False
for oi, ci in info_to_cal.items():
assert ch_names[oi] == fine_cal['ch_names'][ci]
assert not used[oi]
used[oi] = True
info_ch = info['chs'][oi]
ch_num = int(fine_cal['ch_names'][ci].lstrip('MEG').lstrip('0'))
cal_chans.append([ch_num, info_ch['coil_type']])
# Some .dat files might only rotate EZ, so we must check first that
# EX and EY are orthogonal to EZ. If not, we find the rotation between
# the original and fine-cal ez, and rotate EX and EY accordingly:
ch_coil_rot = _loc_to_coil_trans(info_ch['loc'])[:3, :3]
cal_loc = fine_cal['locs'][ci].copy()
cal_coil_rot = _loc_to_coil_trans(cal_loc)[:3, :3]
if np.max([np.abs(np.dot(cal_coil_rot[:, ii], cal_coil_rot[:, 2]))
for ii in range(2)]) > 1e-6: # X or Y not orthogonal
if not adjust_logged:
logger.info(' Adjusting non-orthogonal EX and EY')
adjust_logged = True
# find the rotation matrix that goes from one to the other
this_trans = _find_vector_rotation(ch_coil_rot[:, 2],
cal_coil_rot[:, 2])
cal_loc[3:] = np.dot(this_trans, ch_coil_rot).T.ravel()
# calculate shift angle
v1 = _loc_to_coil_trans(cal_loc)[:3, :3]
_normalize_vectors(v1)
v2 = _loc_to_coil_trans(info_ch['loc'])[:3, :3]
_normalize_vectors(v2)
ang_shift[ci] = np.sum(v1 * v2, axis=0)
if oi in grad_picks:
extra = [1., fine_cal['imb_cals'][ci][0]]
else:
extra = [fine_cal['imb_cals'][ci][0], 0.]
cal_corrs.append(np.concatenate([extra, cal_loc]))
# Adjust channel normal orientations with those from fine calibration
# Channel positions are not changed
info_ch['loc'][3:] = cal_loc[3:]
assert (info_ch['coord_frame'] == FIFF.FIFFV_COORD_DEVICE)
assert used[meg_picks].all()
assert not used[np.setdiff1d(np.arange(len(used)), meg_picks)].any()
ang_shift = ang_shift[list(info_to_cal.values())] # subselect used ones
# This gets written to the Info struct
sss_cal = dict(cal_corrs=np.array(cal_corrs),
cal_chans=np.array(cal_chans))
# Log quantification of sensor changes
# Deal with numerical precision giving absolute vals slightly more than 1.
np.clip(ang_shift, -1., 1., ang_shift)
np.rad2deg(np.arccos(ang_shift), ang_shift) # Convert to degrees
logger.info(' Adjusted coil positions by (μ ± σ): '
'%0.1f° ± %0.1f° (max: %0.1f°)' %
(np.mean(ang_shift), np.std(ang_shift),
np.max(np.abs(ang_shift))))
return calibration, sss_cal
def _get_grad_point_coilsets(info, n_types, ignore_ref):
"""Get point-type coilsets for gradiometers."""
_rotations = dict(
x=np.array([[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1.]]),
y=np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1.]]),
z=np.eye(4))
grad_coilsets = list()
grad_info = pick_info(
_simplify_info(info), pick_types(info, meg='grad', exclude=[]))
# Coil_type values for x, y, z point magnetometers
# Note: 1D correction files only have x-direction corrections
for ch in grad_info['chs']:
ch['coil_type'] = FIFF.FIFFV_COIL_POINT_MAGNETOMETER
orig_locs = [ch['loc'].copy() for ch in grad_info['chs']]
for rot in 'xyz'[:n_types]:
# Rotate the Z magnetometer orientation to the destination orientation
for ci, ch in enumerate(grad_info['chs']):
ch['loc'][3:] = _coil_trans_to_loc(np.dot(
_loc_to_coil_trans(orig_locs[ci]),
_rotations[rot]))[3:]
grad_coilsets.append(_prep_mf_coils(grad_info, ignore_ref))
return grad_coilsets
def _sss_basis_point(exp, trans, cal, ignore_ref=False, mag_scale=100.):
"""Compute multipolar moments for point-like mags (in fine cal)."""
# Loop over all coordinate directions desired and create point mags
S_tot = 0.
# These are magnetometers, so use a uniform coil_scale of 100.
this_cs = np.array([mag_scale], float)
for imb, coils in zip(cal['grad_imbalances'], cal['grad_coilsets']):
S_add = _trans_sss_basis(exp, coils, trans, this_cs)
# Scale spaces by gradiometer imbalance
S_add *= imb[:, np.newaxis]
S_tot += S_add
# Return point-like mag bases
return S_tot
def _regularize_out(int_order, ext_order, mag_or_fine):
"""Regularize out components based on norm."""
n_in = _get_n_moments(int_order)
out_removes = list(np.arange(0 if mag_or_fine.any() else 3) + n_in)
return list(out_removes)
def _regularize_in(int_order, ext_order, S_decomp, mag_or_fine):
"""Regularize basis set using idealized SNR measure."""
n_in, n_out = _get_n_moments([int_order, ext_order])
# The "signal" terms depend only on the inner expansion order
# (i.e., not sensor geometry or head position / expansion origin)
a_lm_sq, rho_i = _compute_sphere_activation_in(
np.arange(int_order + 1))
degrees, orders = _get_degrees_orders(int_order)
a_lm_sq = a_lm_sq[degrees]
I_tots = np.zeros(n_in) # we might not traverse all, so use np.zeros
in_keepers = list(range(n_in))
out_removes = _regularize_out(int_order, ext_order, mag_or_fine)
out_keepers = list(np.setdiff1d(np.arange(n_in, n_in + n_out),
out_removes))
remove_order = []
S_decomp = S_decomp.copy()
use_norm = np.sqrt(np.sum(S_decomp * S_decomp, axis=0))
S_decomp /= use_norm
eigs = np.zeros((n_in, 2))
# plot = False # for debugging
# if plot:
# import matplotlib.pyplot as plt
# fig, axs = plt.subplots(3, figsize=[6, 12])
# plot_ord = np.empty(n_in, int)
# plot_ord.fill(-1)
# count = 0
# # Reorder plot to match MF
# for degree in range(1, int_order + 1):
# for order in range(0, degree + 1):
# assert plot_ord[count] == -1
# plot_ord[count] = _deg_ord_idx(degree, order)
# count += 1
# if order > 0:
# assert plot_ord[count] == -1
# plot_ord[count] = _deg_ord_idx(degree, -order)
# count += 1
# assert count == n_in
# assert (plot_ord >= 0).all()
# assert len(np.unique(plot_ord)) == n_in
noise_lev = 5e-13 # noise level in T/m
noise_lev *= noise_lev # effectively what would happen by earlier multiply
for ii in range(n_in):
this_S = S_decomp.take(in_keepers + out_keepers, axis=1)
u, s, v = _safe_svd(this_S, full_matrices=False, **check_disable)
del this_S
eigs[ii] = s[[0, -1]]
v = v.T[:len(in_keepers)]
v /= use_norm[in_keepers][:, np.newaxis]
eta_lm_sq = np.dot(v * 1. / s, u.T)
del u, s, v
eta_lm_sq *= eta_lm_sq
eta_lm_sq = eta_lm_sq.sum(axis=1)
eta_lm_sq *= noise_lev
# Mysterious scale factors to match MF, likely due to differences
# in the basis normalizations...
eta_lm_sq[orders[in_keepers] == 0] *= 2
eta_lm_sq *= 0.0025
snr = a_lm_sq[in_keepers] / eta_lm_sq
I_tots[ii] = 0.5 * np.log2(snr + 1.).sum()
remove_order.append(in_keepers[np.argmin(snr)])
in_keepers.pop(in_keepers.index(remove_order[-1]))
# heuristic to quit if we're past the peak to save cycles
if ii > 10 and (I_tots[ii - 1:ii + 1] < 0.95 * I_tots.max()).all():
break
# if plot and ii == 0:
# axs[0].semilogy(snr[plot_ord[in_keepers]], color='k')
# if plot:
# axs[0].set(ylabel='SNR', ylim=[0.1, 500], xlabel='Component')
# axs[1].plot(I_tots)
# axs[1].set(ylabel='Information', xlabel='Iteration')
# axs[2].plot(eigs[:, 0] / eigs[:, 1])
# axs[2].set(ylabel='Condition', xlabel='Iteration')
# Pick the components that give at least 98% of max info
# This is done because the curves can be quite flat, and we err on the
# side of including rather than excluding components
max_info = np.max(I_tots)
lim_idx = np.where(I_tots >= 0.98 * max_info)[0][0]
in_removes = remove_order[:lim_idx]
for ii, ri in enumerate(in_removes):
logger.debug(' Condition %0.3f/%0.3f = %03.1f, '
'Removing in component %s: l=%s, m=%+0.0f'
% (tuple(eigs[ii]) + (eigs[ii, 0] / eigs[ii, 1],
ri, degrees[ri], orders[ri])))
logger.debug(' Resulting information: %0.1f bits/sample '
'(%0.1f%% of peak %0.1f)'
% (I_tots[lim_idx], 100 * I_tots[lim_idx] / max_info,
max_info))
return in_removes, out_removes
def _compute_sphere_activation_in(degrees):
u"""Compute the "in" power from random currents in a sphere.
Parameters
----------
degrees : ndarray
The degrees to evaluate.
Returns
-------
a_power : ndarray
The a_lm associated for the associated degrees (see [1]_).
rho_i : float
The current density.
References
----------
.. [1] A 122-channel whole-cortex SQUID system for measuring the brain’s
magnetic fields. Knuutila et al. IEEE Transactions on Magnetics,
Vol 29 No 6, Nov 1993.
"""
r_in = 0.080 # radius of the randomly-activated sphere
# set the observation point r=r_s, az=el=0, so we can just look at m=0 term
# compute the resulting current density rho_i
# This is the "surface" version of the equation:
# b_r_in = 100e-15 # fixed radial field amplitude at distance r_s = 100 fT
# r_s = 0.13 # 5 cm from the surface
# rho_degrees = np.arange(1, 100)
# in_sum = (rho_degrees * (rho_degrees + 1.) /
# ((2. * rho_degrees + 1.)) *
# (r_in / r_s) ** (2 * rho_degrees + 2)).sum() * 4. * np.pi
# rho_i = b_r_in * 1e7 / np.sqrt(in_sum)
# rho_i = 5.21334885574e-07 # value for r_s = 0.125
rho_i = 5.91107375632e-07 # deterministic from above, so just store it
a_power = _sq(rho_i) * (degrees * r_in ** (2 * degrees + 4) /
(_sq(2. * degrees + 1.) *
(degrees + 1.)))
return a_power, rho_i
def _trans_sss_basis(exp, all_coils, trans=None, coil_scale=100.):
"""Compute SSS basis (optionally) using a dev<->head trans."""
if trans is not None:
if not isinstance(trans, Transform):
trans = Transform('meg', 'head', trans)
assert not np.isnan(trans['trans']).any()
all_coils = (apply_trans(trans, all_coils[0]),
apply_trans(trans, all_coils[1], move=False),
) + all_coils[2:]
if not isinstance(coil_scale, np.ndarray):
# Scale all magnetometers (with `coil_class` == 1.0) by `mag_scale`
cs = coil_scale
coil_scale = np.ones((all_coils[3], 1))
coil_scale[all_coils[4]] = cs
S_tot = _sss_basis(exp, all_coils)
S_tot *= coil_scale
return S_tot
| bsd-3-clause |
michaelchu/kaleidoscope | kaleidoscope/datafeeds/sqlite_data.py | 1 | 3289 | from .base import BaseDataFeed
import kaleidoscope.globals as gb
import os
import sqlite3
import pandas as pd
class SQLiteDataFeed(BaseDataFeed):
def __init__(self, path=None):
self.path = path
self.opt_params = (
('symbol', 0),
('underlying_symbol', 1),
('quote_date', 2),
('root', 1),
('expiration', 4),
('strike', 5),
('option_type', 6),
('open', -1),
('high', -1),
('low', -1),
('close', -1),
('trade_volume', 11),
('bid_size', -1),
('bid', 13),
('ask_size', -1),
('ask', 15),
('underlying_price', 16),
('iv', -1),
('delta', 18),
('gamma', 19),
('theta', 20),
('vega', 21),
('rho', 22),
('open_interest', -1)
)
if self.path is None:
# use default path if no path given
self.path = os.path.join(os.sep, gb.PROJECT_DIR, gb.DATA_SUB_DIR, gb.DB_NAME + ".db")
def get(self, symbol, start=None, end=None,
exclude_splits=True, option_type=None):
"""
Data provider wrapper around pandas read_sql_query for sqlite database.
:param symbol: symbol to download option data for
:param start: start date to retrieve data from
:param end: end date to retrieve data to
:param exclude_splits: exclude options created from the underlying's stock splits
:param option_type: If None, or not passed in, will retrieve both calls and puts of option chain
:return: dataframe containing option chains
"""
# TODO: allow for various start and end date configurations
params = {}
# exclude option chains created from the underlying's stock split
if exclude_splits:
params['root'] = symbol
if option_type == 'c':
params['option_type'] = 'c'
if option_type == 'p':
params['option_type'] = 'p'
try:
data_conn = sqlite3.connect(self.path)
query = "SELECT * FROM %s_option_chain" % symbol
# Build the query components as needed
if start is not None and end is None:
query += " WHERE expiration >= '%s'" % start
elif start is None and end is not None:
query += " WHERE expiration <= '%s'" % end
elif start is not None and end is not None:
query += " WHERE expiration >= '%s' AND expiration <= '%s'" % (start, end)
# loop through opt_params, assign filter by column if applicable
if params is not None:
query += " AND"
for k, v in params.items():
query += " %s = '%s' AND" % (k, v)
# remove the trailing 'AND'
query = query[:-4]
# may need to apply chunk size if loading large option chain set
data = pd.read_sql_query(query, data_conn)
# normalize dataframe columns
data = self._normalize(data, self.opt_params)
return data
except IOError as err:
raise IOError(err)
| mit |
joshloyal/scikit-learn | sklearn/neural_network/rbm.py | 46 | 12291 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <dauphiya@iro.umontreal.ca>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='F')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='F')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
manashmndl/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 78 | 4510 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
em-er-es/rollo | rollo/src/rollo_visualization.py | 1 | 15564 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
## @file rollo_visualization.py
# @author Rabbia Asghar
# @author Ernest Skrzypczyk
#
# @date 25/2/16
#
# @brief Visualize motion capture data and EKF estimates
#
# Command prototype: <b>rosrun rollo rollo_visualization _rate:=25 _plotrefreshperiod:=100</b>
# \param rate Running frequency of the node <!25 [Hz]>
# \param plotrefreshperiod Plot refresh period <!100 [1]>
# \param ms Marker scale reference value <20 [1]>
# \param saveim Save path for generated images <!.>
# \param savevid Save path for generated animation video <!.>
# \param imtype Type of saved images <!png>
# \param imformat Format of saved images (dim_x x dim_y) <!512>
# \param duration Duration of visualization <!0>
#
# \warning Not all parameters and functions are currently processed
#
# Project github repository
#
# @see https://github.com/em-er-es/rollo/
#
''' TODO
* Part of the Doxygen documentation still seems not to be respected
* Make marker gradual transition from previous position to current
*C Implement proper animation for pseudo realtime display of measurements
*
* TODO later
* Implement dynamic server reconfiguration, use class for main()
* Double check results for animation
*P Implement saving generated images to a video (code is there)
*P Implement saving generated images to a path
* Implement a duration parameter
* Add references from node to topics and console colors, parse them properly (pycparse)
'''
## Import
## Import basic Python libraries
from __future__ import print_function #Python2
## Import matplotlib
import matplotlib
## Set UI specifics
matplotlib.use('GtkAgg')
## Import plot
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
## Import animation for export to video
from matplotlib import animation
# from matplotlib import interactive
import numpy as np
import time
## Multiprocessing library for process and pipe
from multiprocessing import Process, Pipe
import gobject
## Import ROS libraries
import roslib
roslib.load_manifest('rollo')
import rospy
#from dynamic_reconfigure.server import Server as DynamicReconfigureServer # Not used right now
## Import messages for Rollo
## Import standard Pose2D
from geometry_msgs.msg import Pose2D
## Import custom messages for Rollo
# Confirm the PYTHONPATH environment variable content
## Import EKF message
from rollo.msg import EKF
# from rollo.msg import _EKF # Also works
## Import Pose2DStamped message
from rollo.msg import Pose2DStamped
# Global variables
## Node name using console codes
NodeName = "\033[38;5;160mVIS \033[0m"
# NodeName = "VIS "
## Visualize rate [Hz] == [fps]
rate = 25 # 25 [Hz] = 25 [fps]
## Plot refresh period [1]
plotRefreshPeriod = 100
## Loop counter
LoopCounter = 1
## Marker scale
markerScale = 20
# Plot components and parameters
## Maximal coordinates - symmetrical\n
# Negative value used to mirror the current calibration setup of motion capture system and keep sanity with adjustments to the plot\n
# Positive value used to represent the current calibration setup of motion capture system and keep sanity with adjustments to the plot
axl = 4
# axl = -4
## X axis limit
axlx = axl
## Y axis limit
axly = axl
# Global flags
## Global message flag 1 -- Motion capture data from preprocessor
flagSubscriber1 = False
## Global message flag 2 -- Extended Kalman filter estimates and odometry modeled data from EKF node
flagSubscriber2 = False
## Subscriber callback for measurement data
def subscriberCallbackMeasurement(msg):
# rospy.loginfo("[Rollo][%s][Sub1] Message : [ %6s | %6s | %6s ]", NodeName, msg.pose2d.x, msg.pose2d.y, msg.pose2d.theta) # //DB
# rospy.loginfo("[Rollo][%s][Sub1] Message : [ %6s | %6s | %6s ]", NodeName, msg.x, msg.y, msg.theta) # //DB
global MessageMeasurement
MessageMeasurement = msg
global flagSubscriber1
flagSubscriber1 = True
return 0
## Subscriber callback for EKF data
def subscriberCallbackEKF(msg):
# rospy.loginfo("[Rollo][%s][Sub2] Message : [ %6s | %6s | %6s ]", NodeName, msg.pose2d.x, msg.pose2d.y, msg.pose2d.theta) # //DB
global MessageEKF
MessageEKF = msg
global flagSubscriber2
flagSubscriber2 = True
return 0
# FUNCTIONS UNUSED START
# Animation
## Initialization function for animation
def initAnimation():
## Plot the background of each frame
global Pos
Pos.set_data([], [])
return Pos
## Animation callback function
#
# \param i Interation step
#
# Sequentially called
def animatePlot(i):
global Pos
# x = np.linspace(0, 2, 1000)
# Pos.set_data(MessageMeasurement.x, MessageMeasurement.y)
Pos.set_data(MessageMeasurement.pose2d.x, MessageMeasurement.pose2d.y)
# Pos.set_data(2, 2)
# Pos.set_data(x, MessageMeasurement.y)
# Pos.set_data(1*i, 2*i)
return Pos
## Initilize plot
def initPlot(object):
self.axes(xlim=(-axlx, axlx), ylim=(-axly, axly))
self.grid(1)
# def clearPlot():
# ax = plt.axes(xlim=(-axlx, axlx), ylim=(-axly, axly))
# plt.grid(1)
## Generate and update plot
def generatePlot(initcond):
rospy.loginfo("[Rollo][%s][generatePlot] Init", NodeName) # //DB
## - First loop
if (initcond == 0):
## Initilize plot skeleton
# figure, axis = plt.subplots()
# plt.axis([-axlx, axlx, -axly, axly])
figure = plt.figure()
ax = plt.axes(xlim=(-axlx, axlx), ylim=(-axly, axly))
plt.grid(1)
# plt.ion()
# Pos, = plt.plot(MessageMeasurement.pose2d.x, MessageMeasurement.pose2d.y)
global Pos
Pos, = ax.plot([], [], lw = 2)
# plt.show(figure)
markerScale = 3
## Animation calls
# anim = animation.FuncAnimation(figure, animatePlot, init_func=initAnimation, frames = 25, interval = 1, blit = True)
# anim = animation.FuncAnimation(figure, animatePlot, init_func=initAnimation, frames = 200, interval = 20, blit = True)
# anim = animation.FuncAnimation(figure, animatePlot, init_func=initAnimation, frames = 30, interval = 1, blit = True)
## After initilization send the condition variable
initcond = 1
## Generate quiver plot
# Pos = plt.plot(MessageMeasurement.x, MessageMeasurement.y, 'b', marker=(3, 0, of), markersize = markerScale / 3)
# Pos, = plt.plot(MessageMeasurement.x, MessageMeasurement.y)
Pos, = plt.plot(MessageMeasurement.pose2d.x, MessageMeasurement.pose2d.y)
plt.show(block=False)
# First set up the figure, the axis, and the plot element we want to animate
# fig = plt.figure()
# ax = plt.axes(xlim=(0, 2), ylim=(-2, 2))
# line, = ax.plot([], [], lw=2)
# call the animator. blit=True means only re-draw the parts that have changed.
# anim = animation.FuncAnimation(figure, animate, init_func = init, frames = rate * 10, interval = rate, blit = True)
# Save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
# anim.save('basic_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
# Save to pipe, read from pipe: $ mpv video
# anim.save('video', fps=30, extra_args=['-vcodec', 'libx264'])
# Save generated plot
# plt.savefig('L%d.png' % LoopCounter)
# plt.show(block=False)
## Reset subscriber flags
global flagSubscriber1
flagSubscriber1 = False
global flagSubscriber2
flagSubscriber2 = False
return 0
# FUNCTIONS UNUSED END
class ProcessPlotter(object):
def __init__(self):
# Global variables used from callback
global samples
samples = 2 # Sufficient to connect markers
# Initialize samples with zeroes
## X data from motion capture
self.x1 = np.zeros(samples)
## Y data from motion capture
self.y1 = np.zeros(samples)
## X data from odometry model
self.x2 = np.zeros(samples)
## Y data from odometry model
self.y2 = np.zeros(samples)
## X data from EKF estimates
self.x3 = np.zeros(samples)
## Y data from EKF estimates
self.y3 = np.zeros(samples)
def terminate(self):
plt.close('all')
def poll_draw(self):
def call_back():
## Local variables
# global samples = 10
# samples
# ofd = - 90
# ofd = - 135
## Orientation of the triangular marker
ofd = 150
## Set plot limits
self.ax.axis([-axlx, axlx, -axly, axly])
## Set plot legend
red_marker = mpatches.Patch(color='red', label='Motion capture')
green_marker = mpatches.Patch(color='green', label='Odometry')
blue_marker = mpatches.Patch(color='blue', label='EKF')
self.ax.legend(handles = [red_marker, green_marker, blue_marker], bbox_to_anchor = (0.0, 1.02, 1.0, 0.102), loc = 8, ncol = 3, mode = "expand", borderaxespad = 0.0)
## Set plot labels
self.ax.set_xlabel('x [m]')
self.ax.set_ylabel('y [m]')
self.ax.yaxis.set_label('y [m]')
## Main loop
while 1:
## Exception handler
if not self.pipe.poll():
break
## Read data from pipe
data = self.pipe.recv()
## Exception handler
if data is None:
self.terminate()
return False
# Proceed with processing
else:
# Shift last samples
for i in range(samples - 1):
self.x1[i + 1] = self.x1[i]
self.y1[i + 1] = self.y1[i]
self.x2[i + 1] = self.x2[i]
self.y2[i + 1] = self.y2[i]
self.x3[i + 1] = self.x3[i]
self.y3[i + 1] = self.y3[i]
# Assign data from pipe to local variables
self.x1[0] = data[1]
self.y1[0] = data[2]
self.x2[0] = data[4]
self.y2[0] = data[5]
self.x3[0] = data[7]
self.y3[0] = data[8]
# Correct and assign orientation of marker/robot
of1 = np.rad2deg(data[3]) + ofd
of2 = np.rad2deg(data[6]) + ofd
of3 = np.rad2deg(data[9]) + ofd
# Plot n-samples
## Red - Data from preprocessor
self.ax.plot(self.x1, self.y1, 'r', marker = (3, 0, of1), markersize = markerScale)
## Green - Data from odometry sent by EKF
self.ax.plot(self.x2, self.y2, 'g', marker = (3, 0, of2), markersize = markerScale * 0.8)
## Green - Data from EKF
self.ax.plot(self.x3, self.y3, 'b', marker = (3, 0, of3), markersize = markerScale * 0.6)
if data[0]: # Clear every so often
# rospy.loginfo("[Rollo][%s][ProcessPlotter] Clear and reinitalize plot @ loop: %d", NodeName, LoopCounter) # //VB
rospy.loginfo("[Rollo][%s][ProcessPlotter] Clear and reinitalize plot", NodeName) # //VB
## Clear plot
self.ax.cla()
## Reinitialize plot
# initPlot(self.ax)
self.ax.grid(1)
## Set plot labels
self.ax.set_xlabel('x [m]')
self.ax.set_ylabel('y [m]')
self.ax.axis([-axlx, axlx, -axly, axly])
# self.ax.yaxis.set_label('y [m]')
# self.ax.legend([red_marker, green_marker, blue_marker], ["Motion capture", "Odometry", "EKF"], bbox_to_anchor = (1.0, 1.02, 0.0, 0.102), loc = 3, ncol = 3, mode = "expand", borderaxespad = 0.0)
self.ax.legend(handles = [red_marker, green_marker, blue_marker], bbox_to_anchor = (0.0, 1.02, 1.0, 0.102), loc = 8, ncol = 3, mode = "expand", borderaxespad = 0.0)
self.fig.canvas.draw()
return True
return call_back
def __call__(self, pipe):
rospy.loginfo("[Rollo][%s][ProcessPlotter] Loop: %d", NodeName, LoopCounter) # //VB
# Initialize plot skeleton
## Data transmission pipe between processes
self.pipe = pipe
self.fig, self.ax = plt.subplots()
self.fig.canvas.set_window_title('Rollo visualization node')
plt.grid(1)
self.gid = gobject.timeout_add(0, self.poll_draw())
rospy.loginfo("[Rollo][%s][ProcessPlotter] Initialized", NodeName) # //VB
plt.ioff()
plt.show()
class MultiProcessPlot(object):
## Initilization
def __init__(self):
self.plotpipe, PlotterPipe = Pipe()
## Called process for plotting
self.plotter = ProcessPlotter()
## Process holder
self.plotprocess = Process(target = self.plotter, args = (PlotterPipe, ))
self.plotprocess.daemon = True
self.plotprocess.start()
## Plot function
def plot(self, finished=False):
send = self.plotpipe.send
if finished:
send(None)
else:
if not LoopCounter % plotRefreshPeriod:
reset = 1
else:
reset = 0
## Compose data for pipe
data = [reset,
MessageMeasurement.pose2d.x, MessageMeasurement.pose2d.y, MessageMeasurement.pose2d.theta,
MessageEKF.odompose2d.x, MessageEKF.odompose2d.y, MessageEKF.odompose2d.theta,
MessageEKF.ekfpose2d.x, MessageEKF.ekfpose2d.y, MessageEKF.ekfpose2d.theta]
# print(MessageEKF.ekfpose2d.x, MessageEKF.ekfpose2d.y, MessageEKF.ekfpose2d.theta) # //VB
# print(MessageEKF.odompose2d.x, MessageEKF.odompose2d.y, MessageEKF.odompose2d.theta) # //VB
## Send data through pipe
send(data)
## Reset global flags to receive new input
flagSubscriber1 = False
flagSubscriber2 = False
## Node class function
def RolloVisualization():
""" Node main function
Runs visualization based on provided position and orientation data
"""
##! Initiliaze:
## - Refer to global variable so that it can be changed
global LoopCounter
## - Initialize rospy
# roscpp_initialize(sys.argv)
rospy.init_node('rollo_visualization', anonymous=True)
# Get node parameters
global rate
rate = float(rospy.get_param('~rate', '25'))
global plotRefreshPeriod
plotRefreshPeriod = float(rospy.get_param('~plotrefreshperiod', '100'))
global markerScale
markerScale = float(rospy.get_param('~ms', '20'))
# PARAMETERS UNUSED START
# saveim = string(rospy.get_param('~saveim', '.'))
# savevid = string(rospy.get_param('~savevid', '.'))
# imtype = string(rospy.get_param('~imtype', 'png'))
# imformat = int(rospy.get_param('~imformat', '512'))
# duration = float(rospy.get_param('~d', '0'))
# PARAMETERS UNUSED END
## Set frequency rate for visualization node
rosrate = rospy.Rate(rate)
## Subscribe to EKF topic
rospy.Subscriber('/Rollo/ekf', EKF, subscriberCallbackEKF, queue_size = 1024)
## Subscribe to motion capture topic
# Can use both, but need another callback for that
# rospy.Subscriber('/Optitrack_Rollo/ground_pose', Pose2D, subscriberCallbackMeasurement, queue_size = 1024)
rospy.Subscriber('/Rollo/pose2dstamped', Pose2DStamped, subscriberCallbackMeasurement, queue_size = 1024)
initcond = 0
## Multiprocessing
## Start another process for plotting
mpp = MultiProcessPlot()
# CODE UNUSED START
# processPlotting = multiprocessing.Process(target = generatePlot(0), args=())
# processPlotting = Process(target = generatePlot(0), args=())
# processPlotting.daemon = True
# processPlotting.start()
# CODE UNUSED END
while not rospy.is_shutdown():
## Main loop
# rospy.loginfo("[Rollo][%s][Main] Loop: %d", NodeName, LoopCounter) # //DB
if not LoopCounter % 1000:
rospy.loginfo("[Rollo][%s][Main] Loop: %d", NodeName, LoopCounter) # //DB
## Plot new message set
if (flagSubscriber1 == True) and (flagSubscriber2 == True):
# rospy.loginfo("[Rollo][%s][Main] Generate and update plot", NodeName) # //DB
mpp.plot()
# CODE UNUSED START
## Animation
# Pos, = plt.plot(MessageMeasurement.x, MessageMeasurement.y)
# global animatePlot
# anim = animation.FuncAnimation(figure, animate, frames = 10, interval = 4, blit = True)
# anim.save('basic_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
# CODE UNUSED END
## Sleep to conform node frequency rate
rosrate.sleep()
## Update loop counter
LoopCounter = LoopCounter + 1
## Main loop end
## Wait for plotprocess to finish
mpp.plotprocess.join()
# rospy.spin()
## Script run condition
if __name__ == '__main__':
try:
RolloVisualization()
except rospy.ROSInterruptException:
pass
| gpl-2.0 |
r-rathi/error-control-coding | perf/error_probs.py | 1 | 2278 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
from scipy.misc import comb as C
# Error probability calculations, based on:
# "On the Performance of Concatenated Reed-Solomon, Convolutional and Parity
# Check Codes for BWA Applications"
# www.ieee802.org/16/tg1/phy/contrib/802161pc-00_37.pdf
# pbi - input BER
# psi - input SER
# pbo - output BER
# pso - output SER = prob. that a particular symbol is in error
# pw - WER (Word Error Rate) = prob. that the codeword is in error
# m - symbol size
# n - code length (usually 2^m - 1)
# t - error correction capability
# k - code dimension, n - k = 2t for RS
def psi(m, pbi):
return 1 - (1 - pbi)**m # ~ m * pbi for pbi << 1/m
def pw(n, t, psi):
pw_ = 0
for i in range(t+1, n+1):
pw_ += C(n, i) * (psi ** i) * ((1 - psi) ** (n - i))
return pw_
def pso(n, t, psi):
pso_ = 0
for i in range(t+1, n+1):
pso_ += C(n-1, i-1) * (psi ** i) * ((1 - psi) ** (n - i))
return pso_
def pbo(n, t, pbi, psi, pso):
return pbi * pso / psi
def pbi_vs_pbo(pbis, m, n, t):
pbos = []
for pbi in pbis:
psi_ = psi(m, pbi)
pso_ = pso(n, t, psi_)
pbo_ = pbo(n, t, pbi, psi_, pso_)
pbos.append(pbo_)
return np.array(pbos)
def weight_dist1(q, t, i, exact=False):
# Lin & Costello, Eq 7.3, page 238
A = 0
if i == 0:
return 1
if i <= 2 * t:
return A
for j in range(2*t + 1):
A += (-1)**(i+j) * C(i, j, exact=exact) * (q ** (2*t) - q ** i)
A += (q - 1) ** i
A *= C(q - 1, i, exact=exact) * q ** (-2*t)
return A
def weight_dist2(q, t, l, exact=False):
# Blahut, Corollary 12.1.3, page 378
n = q - 1
d = 2 * t + 1
A = 0
if l == 0:
return 1
if l < d:
return A
for j in range(l - d + 1):
A += (-1)**j * C(l, j, exact=exact) * (q ** (l-d+1-j) - 1)
A *= C(n, l, exact=exact)
return A
def weight_dist3(q, t, l, exact=False):
# Blahut, Theorem 12.1.2, page 376
n = q - 1
d = 2 * t + 1
A = 0
if l == 0:
return 1
if l < d:
return A
for j in range(l - d + 1):
A += (-1)**j * C(l - 1, j, exact=exact) * q ** (l-d-j)
A *= C(n, l, exact=exact) * (q - 1)
return A
| mit |
shenzebang/scikit-learn | sklearn/metrics/regression.py | 175 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Michael Eickenberg <michael.eickenberg@gmail.com>
# Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
raghavrv/scikit-learn | sklearn/linear_model/tests/test_omp.py | 76 | 7752 | # Author: Vlad Niculae
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
deisi/SFG2D | sfg2d/plot.py | 1 | 11656 | #!/usr/bin.env python
# coding: utf-8
"""Module for plot functions."""
import matplotlib.pyplot as plt
import sfg2d
from numpy import transpose, where, linspace, all, array
def fit_model(
x,
y,
yerr,
name,
kwargs_model={},
fit=True,
print_matrix=True,
kwargs_plot={},
show_box=True,
box_coords=None,
):
"""Fit Data in a configurable way.
**Arguments:**
- **x**: X data
- **y**: Y data
- **yerr**: Y error of the data
- **name**: Name of the model
**Keywords:**
- **kwargs_model**: Keywords for the model
- **fit**: Boolean to run fit
- **print_matrix**: Boolean to print Correlation Matrix
- **show_box**: Show fit result box
- **box_coords**: Coordinates of the fit result box
"""
model = getattr(sfg2d.models, name)(x, y, yerr, **kwargs_model)
if fit:
sfg2d.fit_model(
model, print_matrix=print_matrix
)
kwargs_plot.setdefault('color', 'red')
kwargs_plot.setdefault('label', 'Fit')
plt.plot(model.xsample, model.yfit_sample, **kwargs_plot)
if show_box:
model.draw_text_box(box_coords)
return model
def model_plot(
model,
kwargs_errorbar=None,
kwargs_line_plot=None,
shift_x=None,
shift_y=None,
scale_y=None,
normalize=False,
xsample_slice=(0, 1),
kwargs_textbox=None,
show_roi=True,
show_modeled_only=True,
):
"""
**Kwargs:**
- **kwargs_errorbar**: Kwargs passed to errorbar plot. Can be used to change e.g. color of the plot.
- **kwargs_line_plot**: Kwargs passed to line plot of the fit line.
- **shift_x**: Quick hack to shift the fit plot by x
- **shift_y**: Qucick hack to shift the fit by y
- **scale_y**: Qucik hack to scale fit by y
- **normalize**: Normalize fit height to 1 - 0
- **xsample_slice**: Tuple with x start and x stop for finding normalization minimun
- **kwargs_textbox**: If kwargs textbox given, then a textbox with fitresults is drawn
- **show_roi**: Mark the roi of the fit
- **show_modeled_only**: Show only the fit, not the data.
"""
if not kwargs_errorbar:
kwargs_errorbar = {}
if not kwargs_line_plot:
kwargs_line_plot = {}
kwargs_errorbar.setdefault('marker', 'o')
kwargs_errorbar.setdefault('linestyle', 'None')
if show_modeled_only:
xdata = model.xdata
ydata = model.ydata
yerr = model.sigma
else:
xdata = model._xdata
ydata = model._ydata
yerr = model._sigma
xsample = linspace(xdata[0], xdata[-1], model._xsample_num)
ysample = model.fit_res(xsample)
if shift_x:
xdata = xdata + shift_x
xsample = xsample + shift_x
if scale_y:
ydata = ydata * scale_y
ysample = ysample * scale_y
yerr = yerr * scale_y
if normalize:
x_mask = where((xsample > xsample_slice[0]) & (xsample < xsample_slice[1]))
factor = 1 - ysample[x_mask].min()
ydata = (ydata - 1) / factor + 1
ysample = (ysample - 1) / factor + 1
yerr = yerr / factor
if shift_y:
ydata = ydata + shift_y
ysample = ysample + shift_y
plotline, capline, barline = plt.errorbar(
xdata, ydata, yerr, **kwargs_errorbar
)
kwargs_line_plot.setdefault('color', plotline.get_color())
plt.plot(xsample, ysample, **kwargs_line_plot)
if show_roi:
xx = xdata[model.roi]
yy = ydata[model.roi]
x = xx[0], xx[-1]
y = yy[0], yy[-1]
plt.scatter(x, y, marker='x', color='r')
if isinstance(kwargs_textbox, dict):
fig = plt.gcf()
kwargs_textbox.setdefault('x', .6)
kwargs_textbox.setdefault('y', .12)
kwargs_textbox.setdefault('s', model.box_str)
kwargs_textbox.setdefault('transform', fig.transFigure)
plt.text(**kwargs_textbox)
def points_modeled(x, y, yerr=None, xline=None, yline=None, kwargs_point={}, kwargs_line={}):
"""Plot points and line."""
kwargs_point.setdefault('marker', 'o')
kwargs_point.setdefault('linestyle', 'None')
if isinstance(yerr, type(None)):
lines = plt.plot(x, y, **kwargs_point)
point = lines[-1]
else:
point, capline, barline = plt.errorbar(
x, y, yerr, **kwargs_point
)
if not isinstance(xline, type(None)) and not isinstance(yline, type(None)):
# For some reason, color must be set explicitly here,
# otherwise it is not respected. Its a workaround
kwargs_line.setdefault('color', point.get_color())
color = kwargs_line.pop('color')
plt.plot(xline, yline, color=color, **kwargs_line)
def spectrum(
xdata,
ydata,
*args,
ax=None,
xlabel=None,
ylabel=None,
yerr=None,
**kwargs
):
"""
Plot data with pixel axis of data as x-axis
xdata: 1d numpy array for x-axis values.
ydata: 4d numpy array with [delay, frame, spec, pixel]
ax: axis obj to plot on
xlabel: Label of the x-axis
**kwargs are passed to ax.plot
"""
if not ax:
ax = plt.gca()
num_delay, num_frames, num_spec, num_pixel = ydata.shape
for i_delay in range(num_delay):
for i_frame in range(num_frames):
for i_spec in range(num_spec):
spec = ydata[i_delay, i_frame, i_spec]
# We need a scatter like plot if its just one point
if all(array(spec.shape) == 1):
kwargs.setdefault('marker', 'o')
if isinstance(xdata, type(None)):
ax.plot(spec.T, *args, **kwargs)
else:
if isinstance(yerr, type(None)):
ax.plot(xdata, spec.T, *args, **kwargs)
else:
spec_yerr = yerr[i_delay, i_frame, i_spec]
errorline(
xdata, spec.T, spec_yerr.T,
*args, **kwargs
)
if xlabel:
ax.set_xlabel(xlabel)
if ylabel:
ax.set_ylabel(ylabel)
def track(
xdata=None,
ydata=None,
ax=None,
xlabel="RunNumber",
ylabel='SFG Intensity',
show_hlines=False,
**kwargs
):
"""A Track is a Time wise plot of the data.
**Arguments:**
- **ydata**: 4d Numpy array to create plot from
**Keywords:**
- **show_vlines**: Boolean to show vertical lines after each scan.
"""
if not ax:
ax = plt.gca()
delays, frames, spectra = ydata.shape
# oder F, because ppelays is the first index and that
# changes fastest
ydata = ydata.reshape(delays*frames, spectra, order='F')
for ispectrum in range(spectra):
data = ydata[:, ispectrum]
if isinstance(xdata, type(None)):
ax.plot(data, **kwargs)
else:
ax.plot(xdata, data, **kwargs)
if show_hlines:
plt.vlines([delays*frame for frame in range(frames)], ydata.min(), ydata.max())
def trace(
xdata,
ydata,
ax=None,
yerr=None,
xlabel='Time in fs',
ylabel='Bleach in a.u.',
**kwargs
):
"""
data is the result of a subselection.
This plot has delays on its x-axis.
yerr Error bar for the trace. Must have no frame dimension.
if yerr is given frame dimension must be 1.
"""
if not ax:
ax = plt.gca()
# Transpose because we want the delay axis to be the last axis
# of the array.
kwargs.setdefault('marker', 'o')
y = ydata.T
for i in range(len(y)):
pixel = y[i]
for j in range(len(pixel)):
spec = pixel[j]
for frame in spec:
if isinstance(yerr, type(None)):
ax.plot(xdata, frame.T, **kwargs)
else:
plt.errorbar(xdata, frame, yerr[:, j, i], axes=ax, **kwargs)
def contour(
xdata,
ydata,
zdata,
ax=None,
**kwargs
):
"""
Makes a contour plot for an SfgRecord.select() return for xdata, ydata and zdata.
This makes multiple contour plots on top of each other. Normally more then one makes
no sence but it works this way.
**Arguments:**
- **xdata**: Usually pp_delays
- **ydata**: usually wavenumbers
- **zdata**: usually bleach
"""
kwargs.setdefault('extend', 'both')
num_pp_delays, num_frames, num_spectra, num_pixel = zdata.shape
for index_spectrum in range(num_spectra):
for index_frame in range(num_frames):
zzdata = zdata[:, index_frame, index_spectrum].T
plt.contourf(xdata, ydata, zzdata, **kwargs)
def multifig(xdata, ydata, fig_axis=0, kwargs_plot=None, titles=None):
"""Create multiple figures for ydata, by using the axis of fig_ax.
**Argument**:
- **xdata**: 1D array usually wavenumbers
- **ydata**: 4D array as usally.
**kwargs**:
- **fig_axis**: 0-3 and defines the axis of ydata that will be looped
over during creation of the figures. Data is then taken from this
axis per figure. 0 means 1 figure per pp_delay, 1 means 1 figure per
frame and so on.
- **kwargs_plot**: kwrges passed to spectrum plot.
- **titles**: list of titles must have at least same length as number of figures.
**Returns**:
list of created figures.
"""
if not kwargs_plot:
kwargs_plot = {}
fig_numbers = ydata.shape[fig_axis]
figs, axs = [], []
for i in range(fig_numbers):
fig, ax = plt.subplots()
figs.append(fig)
axs.append(ax)
try:
plt.title(titles[i])
except TypeError:
pass
yd = ydata.take([i], fig_axis)
spectrum(xdata, yd, **kwargs_plot)
return figs
def errorline(xdata, ydata, yerr, kwargs_plot=None, kwargs_filllines=None):
"""Function to plot lines with surrounding error lines.
**Arguments**
- **xdata**: array of x data
- **ydata**: array of ydata
- **yerr**: array of yerr. Used as line boundaries
**kwargs**:
- **kwargs_plot**: keywords passed to plot of the data
- **kwargs_fillines**: keywords passed to fillines plot that makes up
the boundaries lines of the error plot.
"""
if not kwargs_plot:
kwargs_plot = {}
if not kwargs_filllines:
kwargs_filllines = {}
lines = plt.plot(xdata, ydata, **kwargs_plot)
ymin, ymax = ydata - yerr, ydata + yerr
kwargs_filllines.setdefault('color', [line.get_color() for line in lines])
kwargs_filllines.setdefault('alpha', 0.3)
lines.append(plt.fill_between(xdata, ymin, ymax, **kwargs_filllines))
return lines
def multifig_errorline(
xdata,
ydata,
yerr,
kwargs_errorline=None,
xlim=None,
ylim=None,
titles=None,
xlabel='Frequency/cm$^{-1}$',
ylabel='Ratio/a.u.',
ioff=True,
):
"""Function to make multiple errorline plots."""
if not kwargs_errorline:
kwargs_errorline = {}
if ioff:
plt.ioff()
figs = []
num_figs = ydata.shape[0]
for i_fig in range(num_figs):
fig, ax = plt.subplots()
figs.append(fig)
errorline(xdata, ydata[i_fig], yerr[i_fig], **kwargs_errorline)
plt.xlabel(xlabel)
plt.xlim(xlim)
plt.ylim(ylim)
if titles:
plt.title(titles[i_fig])
if ioff:
plt.ion()
return figs
| mit |
biokit/biokit | biokit/viz/volcano.py | 1 | 3251 | """Volcano plot"""
import numpy as np
import pylab
import pandas as pd
__all__ = ['Volcano']
class Volcano(object):
"""Volcano plot
In essence, just a scatter plot with annotations.
.. plot::
:width: 80%
:include-source:
import numpy as np
fc = np.random.randn(1000)
pvalue = np.random.randn(1000)
from biokit import Volcano
v = Volcano(fc, -np.log10(pvalue**2))
v.plot(pvalue_threshold=3)
"""
def __init__(self, fold_changes=None, pvalues=None, color=None):
""".. rubric:: constructor
:param list fold_changes: 1D array or list
:param list pvalues: 1D array or list
:param df: a dataframe with those column names:
fold_changes, pvalues, color (optional)
"""
# try to compute the FC now
#if self.fold_change is None:
# self.fold_change = pylab.log2(X1/X0)
#if pvalue is None:
# # assume a normal distribution mean 0 and sigma 1
# import scipy.stats
# self.pvalue = - pylab.log10(scipy.stats.norm.pdf(abs(self.fold_change), 0,1)),
self.fold_changes = np.array(fold_changes)
self.pvalues = np.array(pvalues)
assert len(self.fold_changes) == len(self.pvalues)
if color is None:
self.color = ['blue'] * len(self.pvalues)
else:
self.color = np.array(color)
# TODO: check that the 3 columns have same length
assert len(self.fold_changes) == len(self.color)
self.df = pd.DataFrame({"fold_change": self.fold_changes,
"pvalue": self.pvalues, 'color': self.color})
def plot(self, size=100, alpha=0.5, marker='o', fontsize=16,
xlabel='fold change',
ylabel='p-value', pvalue_threshold=1.5, fold_change_threshold=1):
"""
:param size: size of the markers
:param alpha: transparency of the marker
:param fontsize:
:param xlabel:
:param ylabel:
:param pvalue_threshold: adds an horizontal dashed line at
the threshold provided.
:param fold_change_threshold: colors in grey the absolute fold
changes below a given threshold.
"""
pylab.clf()
mask1 = abs(self.fold_changes) < fold_change_threshold
mask2 = abs(self.fold_changes) >= fold_change_threshold
colors = self.df.color
pylab.scatter(self.fold_changes[mask1],
self.pvalues[mask1],
s=size,
alpha=alpha,
c='grey', marker=marker)
pylab.scatter(self.fold_changes[mask2],
self.pvalues[mask2],
s=size,
alpha=alpha,
c=colors[mask2])
pylab.grid()
#pylab.ylim([0, pylab.ylim()[1]])
#M = max(abs(self.fold_change)) * 1.1
#pylab.xlim([-M, M])
pylab.xlabel(xlabel, fontsize=fontsize)
pylab.ylabel(ylabel, fontsize=fontsize)
pylab.axhline(pvalue_threshold, color='red', linestyle='--')
pylab.axvline(fold_change_threshold, color='red', linestyle='--')
pylab.axvline(-1*fold_change_threshold, color='red', linestyle='--')
| bsd-2-clause |
dsbrown/FreeCAD | src/Mod/Plot/InitGui.py | 18 | 2920 | #***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <jlcercos@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
class PlotWorkbench(Workbench):
"""Workbench of Plot module."""
from plotUtils import Paths
import PlotGui
Icon = 'Icon.svg'
MenuText = "Plot"
ToolTip = ("The Plot module is used to edit/save output plots performed "
"by other tools")
def Initialize(self):
from PySide import QtCore, QtGui
cmdlst = ["Plot_SaveFig",
"Plot_Axes",
"Plot_Series",
"Plot_Grid",
"Plot_Legend",
"Plot_Labels",
"Plot_Positions"]
self.appendToolbar(str(QtCore.QT_TRANSLATE_NOOP(
"Plot",
"Plot edition tools")), cmdlst)
self.appendMenu(str(QtCore.QT_TRANSLATE_NOOP(
"Plot",
"Plot")), cmdlst)
try:
import matplotlib
except ImportError:
from PySide import QtCore, QtGui
msg = QtGui.QApplication.translate(
"plot_console",
"matplotlib not found, Plot module will be disabled",
None,
QtGui.QApplication.UnicodeUTF8)
FreeCAD.Console.PrintMessage(msg + '\n')
Gui.addWorkbench(PlotWorkbench())
| lgpl-2.1 |
chrysante87/pyterpol | synthetic/auxiliary.py | 1 | 10363 | import numpy as np
import matplotlib.pyplot as plt
from astropy.constants import c
from scipy.interpolate import splrep
from scipy.interpolate import splev
from scipy.interpolate import bisplrep
from scipy.interpolate import bisplev
from scipy.interpolate import RectBivariateSpline
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.interpolate import spline
from scipy.signal import fftconvolve
ZERO_TOLERANCE = 1e-6
def flatten_2d(arr):
"""
Flattens 2-dim array
:param arr: 2d array
:return:
"""
newarr = []
if any([isinstance(subarr, (list, tuple)) for subarr in arr]):
for subarr in arr:
if isinstance(subarr, (tuple, list)):
newarr.extend(subarr)
else:
newarr.append(subarr)
return newarr
else:
return arr
def instrumental_broadening(wave, flux, width=0.25, width_type='fwhm', interpolate_back=True):
"""
A convolution of a spectrum with a normal distribution.
:param: wave:
:param: flux:
:param width:
:param width_type:
:return:
"""
# print "Computing instr. broadening."
# If there is no broadening to apply, don't bother
if width < ZERO_TOLERANCE:
return flux
# Convert user input width type to sigma (standard devation)
width_type = width_type.lower()
if width_type == 'fwhm':
sigma = width / 2.3548
elif width_type == 'sigma':
sigma = width
else:
raise ValueError(("Unrecognised width_type='{}' (must be one of 'fwhm'"
"or 'sigma')").format(width_type))
# Make sure the wavelength range is equidistant before applying the
# convolution
delta_wave = np.diff(wave).min()
range_wave = wave.ptp()
n_wave = int(range_wave / delta_wave) + 1
wave_ = np.linspace(wave[0], wave[-1], n_wave)
# flux_ = np.interp(wave_, wave, flux)
flux_ = interpolate_spec(wave, flux, wave_)
dwave = wave_[1] - wave_[0]
n_kernel = int(2 * 4 * sigma / dwave)
# The kernel might be of too low resolution, or the the wavelength range
# might be too narrow. In both cases, raise an appropriate error
if n_kernel == 0:
raise ValueError(("Spectrum resolution too low for "
"instrumental broadening (delta_wave={}, "
"width={}").format(delta_wave, width))
elif n_kernel > n_wave:
raise ValueError(("Spectrum range too narrow for "
"instrumental broadening"))
# Construct the broadening kernel
wave_k = np.arange(n_kernel) * dwave
wave_k -= wave_k[-1] / 2.
kernel = np.exp(- (wave_k) ** 2 / (2 * sigma ** 2))
kernel /= sum(kernel)
# Convolve the flux with the kernel
flux_conv = fftconvolve(1 - flux_, kernel, mode='same')
# And interpolate the results back on to the original wavelength array,
# taking care of even vs. odd-length kernels
if n_kernel % 2 == 1:
offset = 0.0
else:
offset = dwave / 2.0
if interpolate_back:
flux = np.interp(wave + offset, wave_, 1 - flux_conv, left=1, right=1)
# flux = interpolate_spec(wave_, 1-flux_conv, wave+offset)
# Return the results.
return flux
def interpolate_block(x, block, xnew):
"""
Interpolates in each line of a 2d array.
:param x: independent variable
:type x: numpy.float64
:param block: 2d array for each column f(x)= block[i]
:type block: numpy.float64
:param xnew: point at which it is interpolated
:type xnew: float
:return:
"""
intens = np.zeros(len(block[0]))
n = len(block[:, 0])
# set up the order of interpolation
if n > 4:
k = 3
else:
k = n - 1
# k=3
# TODO Can thius be done faster with bisplrep and bisplev
# do the interpolation
for i in range(0, len(block[0])):
y = block[:, i]
tck = splrep(x, y, k=k)
intens[i] = splev(xnew, tck, der=0)
return intens
def interpolate_block_faster(x, block, xnew):
"""
Interpolation of teh spectra... hopefully faster?
:param x:
:param block:
:param xnew:
:return:
"""
# length of the datablock
nx = len(block[0])
ny = len(x)
# print x
if (ny > 3) & (ny < 6):
ky = 3
elif ny > 5:
ky = 5
else:
ky = ny - 1
# print ky
f = RectBivariateSpline(x, np.arange(nx), block, kx=ky, ky=1)
intens = f(xnew, np.arange(nx))[0]
return intens
def interpolate_spec(wave0, intens0, wave1):
"""
Defines a function intens0 = f(wave0) and
than interpolates in it at wave1.
:param wave0: initial wavelength array
:type wave0: numpy.float64
:param intens0: initial intensity array
:type intens0: numpy.float64
:param wave1: wavelength array at which we interpolate
:type wave1: numpy.float64
:return intens1: final intensity array
:rtype intens1: numpy.float64
"""
tck = splrep(wave0, intens0, k=3)
intens1 = splev(wave1, tck)
return intens1
def is_within_interval(v, arr):
"""
Tests whether value v lies within interval [min(arr); max(arr)]
:param v: tested values
:type v: numpy.float64
:param arr: tested array
:type v: numpy.float64
:return:
:param:
:type: bool
"""
# print v, max(arr), min(arr)
if (v - max(arr) > ZERO_TOLERANCE) | (min(arr) - v > ZERO_TOLERANCE):
return False
else:
return True
def generate_least_number(l):
"""
Goes over integer in list and finds the
smallest integer not in the list.
:param l: the list
:return: int the smallest integer
"""
num = 0
while num in l:
num += 1
return num
def keys_to_lowercase(d):
"""
Converts dictionary keys to lowercase
:param d the converted dictionary
:return: dnew
"""
dnew = {}
for key in d.keys():
keynew = key.lower()
dnew[keynew] = d[key]
return dnew
def parlist_to_list(l, property='value'):
"""
Converts a list of Parameter class to a
regular list - only the property is returned
:param l:
:param prop:
:return:
"""
ol = []
for par in l:
ol.append(par[property])
return ol
def sum_dict_keys(d):
"""
Sums dictionary key records.
:param d: the dictionary
:return: s the sum
"""
s = 0.0
for key in d.keys():
s += d[key]
return s
def read_text_file(f):
"""
Reads ascii file f.
:param f: the file
:type f: str
:return lines: list of all lines within file f
:rtype: list
"""
ifile = open(f, 'r')
lines = ifile.readlines()
ifile.close()
return lines
def renew_file(f):
"""
Deletes an existing file.
:param f:
:return:
"""
ofile = open(f, 'w')
ofile.close()
def rotate_spectrum(wave, intens, vrot, epsilon=0.6, interpolate_back=True):
"""
Rotates a spectrum represented by arrays wave and intes to the prjected
rotational velocity vrot.
:param wave: wavelength array
:type wave: numpy.float64
:param intens: intensity array
:type intens: numpy.float64
:param vrot: projected rotational velocity in km/s
:type vrot: float
:param epsilon: Coefficient of linear limb-darkening.
:type epsilon: float
:param interpolate_back: interpolate the spectrum back to the original wavelength sampling
:type interpolate_back: bool
:return intens: the rotated spectrum in the original wavelength sanmpling
:rtype intens: numpy.float64
:return intens_conv: the rotated spectrum equidistant in rv
:rtype intens_conv: numpy.float64
:return wave_conv: the wavelength array equidistant in rv
:rtype wave_conv: numpy.float64
"""
if vrot > ZERO_TOLERANCE:
# we need it equidistant in RV
wave_log = np.log(wave)
rv = np.linspace(wave_log[0], wave_log[-1], len(wave))
step = rv[1] - rv[0]
# interpolate
intens_rv = interpolate_spec(wave_log, intens, rv)
# scale rotational velocity with light speed
vrot = 1000 * vrot / c.value
# get the kernel
# velocity vector
n = int(np.ceil(2 * vrot / step))
rv_ker = np.arange(n) * step
rv_ker = rv_ker - rv_ker[-1] / 2.
y = 1 - (rv_ker / vrot) ** 2
# the kernel
kernel = (2 * (1 - epsilon) * np.sqrt(y) + np.pi * epsilon / 2. * y) / (np.pi * vrot * (1 - epsilon / 3.0))
kernel = kernel / kernel.sum()
# convolve the flux
intens_conv = fftconvolve(1 - intens_rv, kernel, mode='same')
if n % 2 == 1:
rv = np.arange(len(intens_conv)) * step + rv[0]
else:
rv = np.arange(len(intens_conv)) * step + rv[0] - step / 2.
wave_conv = np.exp(rv)
# interpolate back
if interpolate_back:
intens = interpolate_spec(wave_conv, 1 - intens_conv, wave)
return intens
else:
return 1 - intens_conv, wave_conv
def shift_spectrum(wave, RV):
"""
Doppler-shifts spectrum.
:param wave: original wavelength array
:type wave: numpy.float64
:param RV: radial velocity in km/s
:type RV: float
:return new_wave: shifted wavelength array
:rtype new_wave: numpy.float64
"""
# shifts the wavelengths
new_wave = wave * (1 + RV * 1000 / c.value)
return new_wave
def select_index_for_multiple_keywords(d, **kwargs):
"""
From a dictionary of lists selects
one index meeting all requirements.
:param kwargs:
:return:
"""
keys = d.keys()
length = len(d[keys[0]])
for i in range(0, length):
for k in keys:
if d[k] == kwargs[k] and k == keys[-1]:
return i
return -1
def string2bool(s):
"""
Converts string to boolean.
:param s:
:return:
"""
if s.lower() in ['true', '1']:
return True
else:
return False
def write_numpy(f, cols, fmt):
"""
An example of lack of brain of the main developer of this "code".
:param f: outputfile or handler
:param cols: block of data to be writte
:param fmt: format of the blocs
:return: None
"""
np.savetxt(f, cols, fmt=fmt)
| gpl-2.0 |
sbnlp/mTOR-evaluation | networkx_analysis.py | 1 | 125507 | import collections
import csv
import datetime
import fuzzywuzzy.fuzz
import fuzzywuzzy.process
import itertools
import joblib
import libsbml
import lxml
import lxml.etree
import networkx
import numpy
import os
import operator
import pickle
import re
import simstring
import sys
########################################################################
########################################################################
# Globals
# gene_map
GENE_MAP = None
# simstring
SIMSTRING_DB = None
SBO_NODES = None
#SBO_NODES = convert_xml_to_sbonodes()
########################################################################
########################################################################
def now():
return datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d %H:%M:%S')
########################################################################
########################################################################
def exists( x, elements, test = lambda x,y : x == y):
for y in elements:
if test( x, y):
return True
return False
########################################################################
########################################################################
# remove_prefixes
PREFIXES = [ "acetylated ", "activated ", "associated ", \
"bound ", \
"catabolized ", "catalyzed ", "converted ", \
"deacetylated ", "degradated ", "demethylated ", "dephosporylated ", "deubiquinated ", "dissociated ","deactivated ", \
"expressed ", \
"methylated ", \
"positively ",\
"negatively ", \
"regulated ",\
"phosphorylated ",
"regulated ",\
"transcribed ", "translated ", \
"ubiquitinated "]
def remove_prefixes( name):
global PREFIXES
new_name = name
for prefix in PREFIXES:
if prefix != None:
new_name = new_name.replace( prefix, "")
return new_name.strip()
########################################################################
########################################################################
def compute_all_is_a( node, nodes):
all_parents = set( node["is_a"])
for parent_id in node["is_a"]:
all_parents.update( compute_all_is_a( nodes[parent_id], nodes))
return all_parents
def convert_xml_to_sbonodes( file_name = "sbo.xml", output_file_name = "sbo.pickle"):
# load nodes
nodes = {}
sbo_xml = lxml.etree.fromstring( open( file_name, "rt").read())
for term in sbo_xml.xpath( "/*[local-name()='sbo']/*[local-name()='Term']"):
id = term.find( "{http://www.biomodels.net/sbo}id").text
name = term.find( "{http://www.biomodels.net/sbo}name").text
is_a = [];
if term.find( "{http://www.biomodels.net/sbo}is_a") is not None:
is_a = [el.text for el in term.findall( "{http://www.biomodels.net/sbo}is_a")]
nodes[id] = { "id" : id, "name" : name , "is_a" : is_a }
# compute all is_a for fast lookup
is_a_all = {}
for node in nodes.itervalues():
is_a_all[node["id"]] = compute_all_is_a( node, nodes)
for node in nodes.itervalues():
node["is_a"] = is_a_all[node["id"]]
if output_file_name is not None:
pickle.dump( nodes, open( output_file_name, "wb"))
return nodes;
def sbo_is_a( sbo_1, sbo_2):
"return true if sbo_1 is_a sbo_2 (if any of them is None, return true)"
global SBO_NODES
if sbo_1 == sbo_2 or sbo_1 == None or sbo_2 == None:
return True
elif sbo_1 in SBO_NODES:
return sbo_2 in SBO_NODES[sbo_1]["is_a"];
else:
return False
def sbo_is_a2( sbo_1, sbo_2):
"Return true if is a either direction"
return sbo_is_a( sbo_1, sbo_2) or sbo_is_a( sbo_2, sbo_1)
def sbo_name( sbo_1):
global SBO_NODES
return SBO_NODES[sbo_1]["name"]
def load_sbo( file_name = "sbo.pickle"):
global SBO_NODES
SBO_NODES = pickle.load( open( file_name, "rb"))
def sbo_export_graph():
global SBO_NODES
sbo_graph = networkx.DiGraph()
for node in SBO_NODES:
sbo_graph.add_node( node)
for node in SBO_NODES.values():
for parent in node["is_a"]:
sbo_graph.add_edge( node["id"], parent)
export_all_graph( sbo_graph, "sbo_graph")
def sbo_export_graph_nodes( nodes, file_prefix = "test"):
""" exports hierarchy for SBO nodes"""
global SBO_NODES
sbo_graph = networkx.DiGraph()
all_nodes = nodes + [ parent for n in nodes for parent in compute_all_is_a( n) ]
for node in all_nodes:
sbo_graph.add_node( node)
for node in all_nodes:
for parent in node["is_a"]:
sbo_graph.add_edge( node["id"], parent)
export_all_graph( sbo_graph, file_prefix)
def get_terms( miriam_urns):
""" takes a list of miriam encoded urn, e.g. ['urn:miriam:GO:0016579', 'urn:miriam:SBO:0000330']
and returns the strings ["GO:0016579", "SBO:0000330"] """
return [ i[11:]for i in miriam_urns]
def get_sbo_terms( miriam_urns):
""" takes a list of miriam encoded urn, e.g. ['urn:miriam:GO:0016579', 'urn:miriam:SBO:0000330']
and returns the strings ["SBO:0000330"] """
return [ i[11:]for i in miriam_urns if i.startswith( "urn:miriam:SBO:")]
def get_sbo_int( miriam_urns):
""" takes a list of miriam encoded urn, e.g. ['urn:miriam:GO:0016579', 'urn:miriam:SBO:0000330']
and returns the integers [330] """
return [ int( i[15:]) for i in miriam_urns if i.startswith( "urn:miriam:SBO:")]
########################################################################
########################################################################
ST_SBO_GO_MAP = { # degradation
'acetylation': 'SBO:0000215',
'activation': 'SBO:0000170',
'association': 'SBO:0000297',
'binding': 'SBO:0000297',
'catabolism': 'GO:0009056',
'catalysis': 'SBO:0000172',
'conversion': 'SBO:0000182',
'deacetylation': 'GO:0006476',
'degradation': 'SBO:0000179',
'demethylation': 'GO:0006482',
'dephosphorylation': 'SBO:0000330',
'deubiquitination': 'GO:0016579',
'dissociation': 'SBO:0000180',
'gene_expression': 'SBO:0000205',
'inactivation': 'SBO:0000169',
'localization': 'GO:0051179',
'methylation': 'SBO:0000214',
'negative_regulation': 'SBO:0000169',
'pathway': 'SBO:0000375',
'phosphorylation': 'SBO:0000216',
'positive_regulation': 'SBO:0000170',
'protein_catabolism': 'SBO:0000179',
'regulation': 'SBO:0000168',
'transcription': 'SBO:0000183',
'translation': 'SBO:0000184',
'transport': 'SBO:0000185',
'ubiquitination': 'SBO:0000224'}
SBO_GO_ST_MAP = { v : k for k, v in ST_SBO_GO_MAP.iteritems()}
def sbo_go_name( urn_miriam):
if urn_miriam.startswith( "urn:miriam:"):
urn_miriam = urn_miriam[11:]
if urn_miriam in SBO_GO_ST_MAP:
return SBO_GO_ST_MAP[urn_miriam]
elif urn_miriam.startswith( "SBO:"):
return sbo_name( urn_miriam)
else:
return urn_miriam
def sbo_go_name_known( urn_miriam):
if urn_miriam.startswith( "urn:miriam:"):
urn_miriam = urn_miriam[11:]
if urn_miriam in SBO_GO_ST_MAP:
return True
elif urn_miriam.startswith( "SBO:"):
return True
else:
return False
########################################################################
########################################################################
def clean_name( name):
return remove_prefixes( name.lower()).strip()
def clean_name2( name):
return re.sub('[^a-zA-Z0-9-]', ' ', remove_prefixes( name.lower())).strip()
def names( graph):
return [graph.node[n].get("name") for n in graph.nodes() if graph.node[n].get("name")]
def names_clean( graph):
return [ remove_prefixes( graph.node[n].get("name").lower()) for n in graph.nodes() if graph.node[n].get("name")]
def names_clean2( graph):
return [ re.sub('[^a-zA-Z0-9-]', ' ', remove_prefixes( graph.node[n].get("name").lower())) for n in graph.nodes() if graph.node[n].get("name")]
########################################################################
########################################################################
def sort_edge_signature( signature, graph):
""" takes (species122,reaction122,"product") and returns (reaction122,species122,"product") """
if signature[2] == "reactant" and graph.node[signature[0]]["type"] != "species":
return (signature[1],signature[0],signature[2])
elif signature[2] == "product" and graph.node[signature[1]]["type"] != "species":
return (signature[1],signature[0],signature[2])
elif signature[2] == "modifier" and graph.node[signature[0]]["type"] != "species":
return (signature[1],signature[0],signature[2])
else:
return signature
def edge_signatures( graph):
signatures = set([ sort_edge_signature( (edge[0], edge[1], edge[2]["type"]), graph) for edge in graph.edges( data = True)])
assert( len(signatures) == len( graph.edges()))
return signatures
########################################################################
########################################################################
def create_gene_map( chilibot = True, hugo = True, human_entrez = False):
lists = []
print( "create_gene_map")
print("Loading data")
if chilibot:
with open( "gene_list_chilibot.txt", "rt") as f:
txt = f.read()
for line in txt.strip().split("\n"):
line = line.strip(";")
synonyms = [ line.split( "|")[0].strip()] + line.split( "|")[1].split( ";")
lists.append( set( [s.lower() for s in synonyms]))
if hugo:
with open('gene_list_hugo.txt', 'rU') as f:
csv_list = csv.reader( f, delimiter = '\t')
for row in csv_list:
lists.append( set( [ s.lower() for s in filter( bool, row) if s != ""]))
if human_entrez:
with open('gene_list_human_entrez.txt', 'r') as f:
lines = f.read().split("\n")
lines.pop(0) # remove first line
for line in lines:
synonyms = [s.lower() for s in line.strip().split("\t")]
synonyms.pop(0)
lists.append( set(synonyms))
print("Merging lists")
dict_forward = {} # maps el : value
dict_backward = {} # maps val : list of elements
new_value_counter = 0
for idx, l in enumerate(lists):
if idx % 10000 == 0:
print( "Processed %i" % idx)
new_value_counter += 1
new_value = new_value_counter
# compute overlap_values - those values overlapping
overlap_values = set()
for e in l:
if e in dict_forward:
overlap_values.add( dict_forward[e])
elements = set(l) # initialize elements with known values
if overlap_values != set():
new_value = new_value_counter
new_value_counter += 1
# update elements with known values
for val in overlap_values:
elements.update( dict_backward[val])
# update dict_forward
for e in elements:
dict_forward[e] = new_value
# update dict_backward
for val in overlap_values:
del dict_backward[val]
dict_backward[new_value] = elements
else: # no overlap found, just add elements to dicts
for e in elements:
dict_forward[e] = new_value
dict_backward[new_value] = elements
lists = list(dict_backward.values())
print("Merging lists finished (%i total sets)" % len( lists))
print("Computing gene map")
gene_map = {}
for l in lists:
listt = [ re.sub('[^a-zA-Z0-9-]', ' ', e.lower()) for e in l if e != ""]
if listt != []:
val = listt[0]
for l in listt:
gene_map[l] = val
print("Computing gene map (%i total names/genes)" % len( gene_map))
print("Exporting gene map")
pickle.dump( gene_map, open( "gene_map.pickle", "wb"))
return gene_map
def create_simstring_txt( gene_map):
""" Creates gene_list.txt for usage in simstring db
use: simstring -b -d gene_list.simstring < gene_list.txt
afterwards to create simstring"""
print( "create_simstring_txt")
with open( "gene_list.txt", "wt") as f:
f.write( "\n".join( gene_map.keys() + list( set( gene_map.values()))))
def create_simstring_db():
""" Creates simstring database
use: simstring -b -d gene_list.simstring < gene_list.txt"""
import commands
print( "create_simstring_db")
ret = commands.getstatusoutput('simstring -b -d gene_list.simstring < gene_list.txt')
print( ret)
print( "create_simstring_db finished")
def create_gene_map_AND_simstring_db():
gene_map = create_gene_map()
# gene_map = pickle.load( open( "gene_map.pickle", "rb"))
create_simstring_txt( gene_map)
create_simstring_db()
#######################
def map_gene_fuzzywuzzy( name, threshold = 90):
global GENE_MAP
assert(GENE_MAP)
clean_name = clean_name2( name)
if GENE_MAP.get( clean_name):
return set( [GENE_MAP[clean_name]])
else:
results = set()
for k in GENE_MAP.keys():
if fuzzywuzzy.fuzz.ratio( clean_name, k) > threshold:
results.add( GENE_MAP[k])
if results != set():
return results
else:
return None
def map_gene_simstring( name):
"retrieves gene_map results by simstring matching and lookup"
global GENE_MAP, SIMSTRING_DB
assert( GENE_MAP and SIMSTRING_DB)
clean_name = clean_name2( name)
if GENE_MAP.get( clean_name):
return set( [GENE_MAP[clean_name]])
else:
results = SIMSTRING_DB.retrieve( clean_name)
if results:
return set( [GENE_MAP[r] for r in results])
else:
return None
def export_mapping( mapping, file_name):
with open( file_name, "wt") as f:
f.write( "\n".join( [ "{} : {}".format( k, ",".join( [str(v) for v in values])) for k, values in mapping.itervalues()]))
def compute_simstring_coverage( names, thresholds = [ i/10.0 for i in range(1, 10)], measure = simstring.cosine):
results = []
for t in thresholds:
db = simstring.reader( 'gene_list.simstring')
db.measure = measure
db.threshold = t
results.append( [ True for n in names if map_gene_simstring(n, db)].count( True) / float( len( names)))
return results
########################################################################
########################################################################
def export_graph( graph, graph_name, prog = "dot"):
agraph = networkx.to_agraph( graph)
## "neato"|"dot"|"twopi"|"circo"|"fdp"|"nop"
agraph.layout( prog = prog)
file_name = graph_name + "_" + prog + ".pdf"
agraph.draw( file_name)
print( "Exported {}".format( file_name))
def export_all_graph( graph, graph_name):
for prog in ["neato", "dot", "twopi", "circo", "fdp"]:
export_graph( graph, graph_name, prog = prog)
########################################################################
########################################################################
def load_sbml( file_name):
reader = libsbml.SBMLReader()
document = reader.readSBML( file_name)
print( "Loaded {} ({} errors)".format( file_name, document.getNumErrors()))
return document
def get_participants_species( species, prefix, model):
""" Takes an SBML species and returns its participants (mTOR)"""
annotation = species.getAnnotation()
if annotation == None:
return []
# retrieve path
annotation_path_names = [ 'RDF', 'Participants']
current_state = annotation
for name in annotation_path_names:
last_state = current_state
current_state = None
for i in xrange( last_state.getNumChildren()):
if last_state.getChild(i).getName() == name:
current_state = last_state.getChild(i)
if current_state == None:
break
# retrieve participants
participants = []
if current_state != None:
for idx in range( current_state.getNumChildren()):
child = current_state.getChild( idx)
if child.getName() != 'Participant':
sys.stderr.write( "\nERROR: unexpected participant xml name {}".format( prefix + species.getId()))
sys.stderr.flush()
elif child.getAttrValue("participant") == "":
sys.stderr.write( "\nERROR: unexpected participant attribute value {}".format( prefix + species.getId()))
sys.stderr.flush()
elif model.getSpecies( child.getAttrValue("participant")) == None:
sys.stderr.write( "\nERROR: participant {} does not exist in model (species: {})".format( child.getAttrValue("participant"), prefix + species.getId()))
sys.stderr.flush()
else:
participants.append( child.getAttrValue("participant"))
return participants
def create_graph( model, prefix = "", ignore_participant_graph = False,
skip_uris = ["urn:miriam:reactome", "urn:miriam:pubmed", "urn:miriam:ec"]):
graph = networkx.Graph();
# add species
for species in model.getListOfSpecies():
bqbiol_is = []
bqbiol_has_part = []
bqbiol_has_version = []
if species.getCVTerms() != None:
for term in species.getCVTerms():
uris = [ term.getResourceURI( idx) for idx in xrange( term.getNumResources()) if not any( term.getResourceURI( idx).startswith(s) for s in skip_uris)]
if term.getBiologicalQualifierType() in [libsbml.BQB_IS, libsbml.BQB_IS_HOMOLOG_TO]:
bqbiol_is.extend( uris)
elif term.getBiologicalQualifierType() == libsbml.BQB_HAS_PART:
bqbiol_has_part.extend( uris)
elif term.getBiologicalQualifierType() == libsbml.BQB_HAS_VERSION:
bqbiol_has_version.extend( uris)
sbo = species.getSBOTerm()
if sbo == -1:
sbo = None;
sbo_str = None;
else:
sbo_str = "SBO:{0:07d}".format( sbo)
annotation = {}
for prefix in PREFIXES:
annotation[ prefix.strip()] = species.getName().count( prefix)
if species.getCompartment() == "default":
compartment = None
compartment_id = None
else:
compartment = model.getCompartment( species.getCompartment()).getName().lower().strip()
compartment_id = species.getCompartment()
node_data = { "type" : "species",
"id" : prefix + species.getId(),
"name" : species.getName(),
"compartment" : compartment,
"compartment_id" : compartment_id,
"bqbiol_is" : tuple( sorted( set( bqbiol_is))),
"bqbiol_has_part" : tuple( sorted( set( bqbiol_has_part))),
"bqbiol_has_version" : tuple( sorted( set( bqbiol_has_version))),
"sbo" : sbo,
"sbo_str" : sbo_str,
"participants" : [],
"participant_ids" : [],
"annotation" : annotation};
graph.add_node( prefix + species.getId(), node_data)
# add species reactions
for reaction in model.getListOfReactions():
bqbiol_is = []
bqbiol_has_part = []
bqbiol_has_version = []
if reaction.getCVTerms() != None:
for term in reaction.getCVTerms():
uris = [ term.getResourceURI( idx) for idx in xrange( term.getNumResources()) if not any( term.getResourceURI( idx).startswith(s) for s in skip_uris)]
if term.getBiologicalQualifierType() in [libsbml.BQB_IS, libsbml.BQB_IS_HOMOLOG_TO]:
bqbiol_is.extend( uris)
elif term.getBiologicalQualifierType() == libsbml.BQB_HAS_PART:
bqbiol_has_part.extend( uris)
elif term.getBiologicalQualifierType() == libsbml.BQB_HAS_VERSION:
bqbiol_has_version.extend( uris)
sbo = reaction.getSBOTerm()
if sbo == -1:
sbo = None;
sbo_str = None;
else:
sbo_str = "SBO:{0:07d}".format( sbo)
bqbiol_is.append( "urn:miriam:SBO:{0:07d}".format( sbo))
graph.add_node( prefix + reaction.getId(),
{ "type" : "reaction",
"id" : prefix + reaction.getId(),
"local_id" : reaction.getId(),
"name" : reaction.getName(),
"compartment" : reaction.getCompartment(),
"bqbiol_is" : tuple( sorted( set( bqbiol_is))),
"bqbiol_has_part" : tuple( sorted( set( bqbiol_has_part))),
"bqbiol_has_version" : tuple( sorted( set( bqbiol_has_version))),
"sbo" : sbo,
"sbo_str" : sbo_str} )
# add edges
for i in xrange( model.getNumReactions()):
reaction = model.getReaction(i);
for r in xrange( reaction.getNumReactants()):
graph.add_edge( prefix + reaction.getId(), prefix + reaction.getReactant(r).getSpecies(), type = "reactant")
for p in xrange( reaction.getNumProducts()):
graph.add_edge( prefix + reaction.getId(), prefix + reaction.getProduct(p).getSpecies(), type = "product")
for m in xrange( reaction.getNumModifiers()):
graph.add_edge( prefix + reaction.getId(), prefix + reaction.getModifier(m).getSpecies(), type = "modifier")
if ignore_participant_graph:
return graph
else:
# participant graph
participant_graph = networkx.DiGraph()
graph_w_participant_edges = graph.copy()
# add participant links
for i in xrange( model.getNumSpecies()):
species = model.getSpecies(i);
graph_node = graph.node[ prefix + species.getId()]
for participant in get_participants_species( species, prefix, model):
# add participant graph edge
participant_graph.add_edge( prefix + species.getId(), prefix + participant, type = "participant")
graph_w_participant_edges.add_edge( prefix + species.getId(), prefix + participant, type = "participant")
# add participant node information to
graph_node["participant_ids"].append( prefix + participant)
graph_node["participants"].append( graph.node[prefix + participant])
graph_node["bqbiol_has_part"] = tuple( sorted( set( list( graph.node[prefix + participant]["bqbiol_has_part"]) + list( graph_node["bqbiol_has_part"]))))
return graph, participant_graph, graph_w_participant_edges
########################################################################
def bqbiol_is_map( graph):
"returns a dictionary mapping of uri to node ids {uri : set( node ids)}"
signature_map = {}
for i in graph.nodes():
node = graph.node[i]
if signature_map.get( node["bqbiol_is"]) == None:
signature_map[node["bqbiol_is"]] = [i]
else:
signature_map[node["bqbiol_is"]].append( i)
return signature_map
def get_all_bqbiol_is_uris( graph):
""" Returns all bqbiol_is uris from a graph """
unique_ids = set()
for n in graph.nodes( data = True):
if n[1].get("bqbiol_is") and n[1].get("bqbiol_is") != ():
unique_ids.update( n[1].get("bqbiol_is"))
return unique_ids
########################################################################
########################################################################
def find_nodes( graph, attribute, value):
return [ n[1] for n in graph.nodes( data = True ) if n[1].get( attribute) != None and n[1][attribute] == value]
########################################################################
########################################################################
def filter_graph_remove_species_wo_bqbiol_is( graph):
"Remove species without bqbiol_is"
graph_cpy = graph.copy()
remove_nodes = []
for node in graph_cpy.nodes( data = True):
# require nothing for reaction
n = node[1]
if n["type"] != "reaction" and n["bqbiol_is"] == ():
remove_nodes.append( node[0])
graph_cpy.remove_nodes_from( remove_nodes)
graph_cpy.name = graph.name + "-REMOVED-SPECIES-WO-BQBIOL-IS"
graph_cpy.file_name = None
return graph_cpy
def filter_graph_remove_isolated_nodes( graph):
"Remove nodes without connections"
graph_cpy = graph.copy()
graph_cpy.remove_nodes_from( networkx.isolates( graph))
graph_cpy.name = graph.name + "-NO-ISOLATED-NODES"
graph_cpy.file_name = None
return graph_cpy
def filter_graph_remove_isolated_participants( graph):
"""Remove nodes without connections that are participants -
keep isolated nodes that are not participatns"""
graph_cpy = graph.copy()
isolates = set( networkx.isolates( graph))
participants = set( [p for parts in [ [p ["id"] for p in n[1].get("participants")] for n in graph.nodes( data = True) if n[1].get("participants")] for p in parts ])
graph_cpy.remove_nodes_from( isolates.intersection( participants))
graph_cpy.name = graph.name + "-NO-ISOLATED-NODES"
graph_cpy.file_name = None
return graph_cpy
def filter_graph_remove_reactions_wo_sbo( graph):
"Remove reactions without bqbiol_is"
graph_cpy = graph.copy()
remove_nodes = []
for node in graph_cpy.nodes( data = True):
# require nothing for reaction
n = node[1]
if n["type"] != "species" and n["sbo"] == None:
remove_nodes.append( node[0])
graph_cpy.remove_nodes_from( remove_nodes)
graph_cpy.name = graph.name + "-REMOVED-REACTIONS-WO-SBO"
graph_cpy.file_name = None
return graph_cpy
def filter_reactions( graph):
"remove all nodes that are NOT a reaction"
graph_cpy = graph.copy()
non_reaction_ids = [ n[0] for n in graph_cpy.nodes( data = True) if n[1]["type"] != "reaction"]
graph_cpy.remove_nodes_from( non_reaction_ids)
graph_cpy.name = graph.name + "-REACTIONS"
graph_cpy.file_name = None
return graph_cpy
def filter_reactions_sbo( graph):
"Remove all nodes that are NOT reactions with SBO"
graph_cpy = graph.copy()
non_reaction_ids = [ n[0] for n in graph_cpy.nodes( data = True) if n[1]["type"] != "reaction" or n[1]["sbo"] == None]
graph_cpy.remove_nodes_from( non_reaction_ids)
graph_cpy.name = graph.name + "-SBO-REACTIONS"
graph_cpy.file_name = None
return graph_cpy
def filter_species( graph):
"Remove all nodes that are NOT species"
graph_cpy = graph.copy()
non_species_ids = [ n[0] for n in graph_cpy.nodes( data = True) if n[1]["type"] != "species"]
graph_cpy.remove_nodes_from( non_species_ids)
graph_cpy.name = graph.name + "-SPECIES"
graph_cpy.file_name = None
return graph_cpy
def filter_species_bqbiol_is( graph):
"Remove all nodes that are NOT species with bqbiol_is"
graph_cpy = graph.copy()
non_bqbiol_is_species_ids = [ n[0] for n in graph_cpy.nodes( data = True) if n[1]["type"] != "species" or n[1]["bqbiol_is"] == ()]
graph_cpy.remove_nodes_from( non_bqbiol_is_species_ids)
graph_cpy.name = graph.name + "-BQBIOL-IS-SPECIES"
graph_cpy.file_name = None
return graph_cpy
def filter_species_complex( graph):
"Removes all nodes that are not complex - don't have participants"
graph_cpy = graph.copy()
non_complexes = [n[0] for n in graph.nodes( data = True) if not n[1].get("participants")]
graph_cpy.remove_nodes_from( non_complexes)
graph_cpy.name = graph.name + "-COMPLEXES"
graph_cpy.file_name = None
return graph_cpy
def filter_species_complex2( graph):
"REmoves all nodes that are not complex - do not have sbo == 253"
graph_cpy = graph.copy()
non_complexes = [n[0] for n in graph.nodes( data = True) if not n[1].get("sbo") or n[1]["sbo"] != 253]
graph_cpy.remove_nodes_from( non_complexes)
graph_cpy.name = graph.name + "-COMPLEXES"
graph_cpy.file_name = None
return graph_cpy
########################################################################
########################################################################
def run_analysis( graph, export_file = None):
""" Collects some simple statistics about the graph """
import pandas
print("%s:%s: run_analysis" % (now(), graph.name))
species = filter_species( graph)
reactions = filter_reactions( graph)
edges = [n[2] for n in graph.edges( data = True)]
isolated_nodes = networkx.isolates( graph)
print("%s:%s: Computing statistics" % (now(), graph.name))
d = {"name" : graph.name,
"# nodes" : len( graph.nodes()),
"# species" : len( species.nodes()),
"# reactions" : len( reactions.nodes()),
"# edges" : len( edges),
"# edges reactant" : len( [ e for e in edges if e["type"] == "reactant"]),
"# edges product" : len( [ e for e in edges if e["type"] == "product"]),
"# edges modifier" : len( [ e for e in edges if e["type"] == "modifier"]),
"# compartments" : len(set( [species.node[s]["compartment_id"] for s in species.nodes() if species.node[s]["compartment_id"]])),
"# unique compartment names" : len(set( [species.node[s]["compartment"] for s in species.nodes() if species.node[s]["compartment"]])),
"# isolated nodes" : len(isolated_nodes),
"# isolated subgraphs" : len( list( networkx.connected_component_subgraphs( graph)))}
data = pandas.Series(d)
print("%s:%s: Results" % (now(), graph.name))
print( data)
if export_file:
print("%s:%s: Exporting %s" % (now(), graph.name, export_file))
data.to_pickle( export_file)
print("%s:%s: Computing isolated nodes" % (now(), graph.name))
isolates = set( networkx.isolates( graph))
participants = set( [p for parts in [ [p ["id"] for p in n[1].get("participants")] for n in graph.nodes( data = True) if n[1].get("participants")] for p in parts ])
real_isolates = isolates.difference( participants) # we have to discount those that are participants in a complex
d["isolates # nodes"] = len( real_isolates)
d["isolates # species"] = len( [n for n in real_isolates if graph.node[n]["type"] == "species"])
d["isolates # reactions"] = len( [n for n in real_isolates if graph.node[n]["type"] == "reaction"])
print("%s:%s: Computing subgraphs" % (now(), graph.name))
# compute new graph with participant links
participant_edges = []
subgraphs = None
for n1 in graph.nodes(data=True):
if "participants" in n1[1] and n1[1]["participants"] != []:
participant_edges.extend( [(n1[1]["id"], n2["id"]) for n2 in n1[1]["participants"]])
if participant_edges != []:
graph = graph.copy()
[graph.add_edge( e[0], e[1], type = "participant") for e in participant_edges]
subgraphs = list( networkx.connected_component_subgraphs( graph))
elif subgraphs == None:
subgraphs = list( networkx.connected_component_subgraphs( graph))
nr_nodes = [ len( s.nodes()) for s in subgraphs]
nr_edges = [ len( s.edges()) for s in subgraphs]
d["subgraphs # subgraphs"] = len( subgraphs)
d["subgraphs # nodes min"] = min(nr_nodes)
d["subgraphs # nodes mean"] = numpy.mean( nr_nodes)
d["subgraphs # nodes median"] = numpy.median( nr_nodes)
d["subgraphs # nodes max"] = max( nr_nodes)
d["subgraphs nodes histogram"] = collections.Counter( nr_nodes)
d["subgraphs # edges min"] = min(nr_nodes)
d["subgraphs # edges mean"] = numpy.mean( nr_nodes)
d["subgraphs # edges median"] = numpy.median( nr_nodes)
d["subgraphs # edges max"] = max( nr_nodes)
d["subgraphs edges histogram"] = collections.Counter( nr_edges)
data = pandas.Series(d)
print("%s:%s: Results" % (now(), graph.name))
print( data)
if export_file:
print("%s:%s: Exporting %s" % (now(), graph.name, export_file))
data.to_pickle( export_file)
return data
########################################################################
########################################################################
def run_analysis_signatures( graph, export_file = None, d = {}):
""" Collects some statistics about the graphs names, bqbiol is signatures etc
This takes a long time at this point. Use carefully"""
print("%s:%s: run_analysis_signatures" % (now(), graph.name))
import pandas
species = filter_species( graph)
reactions = filter_reactions( graph)
if not "name" in d.keys():
d["name"] = graph.name
## names
print("%s:%s: Computing name statistics" % (now(), graph.name))
species_names = [ species.node[n]["name"].lower() for n in species if species.node[n]["name"] != ""]
d["species % have name"] = 100. * len(species_names) / len( species.nodes())
d["species # unique names"] = len(set(species_names))
species_clean_names = set([ clean_name(species.node[n]["name"]) for n in species if species.node[n]["name"] != ""])
d["species # unique clean names"] = len(species_clean_names)
species_clean_names2 = set([ clean_name2(species.node[n]["name"]) for n in species if species.node[n]["name"] != ""])
d["species # unique clean names2"] = len(species_clean_names2)
similar_names = []
for name in species_clean_names2:
similar = filter( lambda n: fuzzywuzzy.fuzz.ratio( name, n) > 90, species_clean_names2)
similar_names.append( set( [name] + similar))
similar_names = merge( similar_names)
d["species # similar unique clean names2"] = len( similar_names)
print("%s:%s: Computing bqbiol_is statistics species" % (now(), graph.name))
species_bqbiolis = [ species.node[n]["bqbiol_is"] for n in species if species.node[n]["bqbiol_is"]]
species_bqbiolis_signature_unique = set( species_bqbiolis)
species_bqbiolis_terms = set( [ b for n in species for b in species.node[n]["bqbiol_is"]])
d["species % have bqbiol_is"] = 100. * len( species_bqbiolis) / float( len(species))
d["species # unique bqbiol_is signatures"] = len( species_bqbiolis_signature_unique)
d["species # unique bqbiol_is terms"] = len( species_bqbiolis_terms)
species_bqbiol_has_part = [ species.node[n]["bqbiol_has_part"] for n in species if species.node[n]["bqbiol_has_part"]]
species_bqbiol_has_part_signature_unique = set( species_bqbiol_has_part)
species_bqbiol_has_part_terms = set( [ b for n in species for b in species.node[n]["bqbiol_has_part"]])
d["species % have bqbiol_has_part"] = 100* len( species_bqbiol_has_part) / float( len(species))
d["species # unique bqbiol_has_part signatures"] = len( species_bqbiol_has_part_signature_unique)
d["species # unique bqbiol_has_part terms"] = len( species_bqbiol_has_part_terms)
print("%s:%s: Computing bqbiol_is statistics reactions" % (now(), graph.name))
reactions_uri = [ reactions.node[n]["bqbiol_is"] for n in reactions if reactions.node[n]["bqbiol_is"]]
reactions_uri_signature_unique = set( reactions_uri)
reactions_bqbiol_terms = [ b for n in reactions for b in reactions.node[n]["bqbiol_is"]]
reactions_bqbiol_terms_known = [ t for t in reactions_bqbiol_terms if sbo_go_name_known(t)]
reactions_bqbiol_terms_set = set( reactions_bqbiol_terms)
reactions_bqbiol_terms_known_set = set(reactions_bqbiol_terms_known)
unknown_terms = reactions_bqbiol_terms_set.difference( reactions_bqbiol_terms_known_set)
d["reactions % have bqbiol_is"] = 100* len( reactions_uri) / float( len(reactions))
d["reactions # unique bqbiol_is signatures"] = len( reactions_uri_signature_unique)
d["reactions # unique bqbiol_is terms"] = len(reactions_bqbiol_terms_set)
d["reactions # unique bqbiol_is terms known SBO/GO terms"] = len(reactions_bqbiol_terms_set)
d["reactions # unique bqbiol_is terms unknown SBO/GO terms"] = len( unknown_terms)
d["reactions bqbiol_is terms histogram"] = collections.Counter( reactions_bqbiol_terms_known)
data = pandas.Series(d)
print("%s:%s: Results" % (now(), graph.name))
print( data)
if export_file:
print("%s:%s: Exporting %s" % (now(), graph.name, export_file))
data.to_pickle( export_file)
########################################################################
########################################################################
def run_analysis_isolated_nodes( graph):
print( "\n\nrun_analysis_isolated_nodes(%s)" % graph.name)
isolates = set( networkx.isolates( graph))
participants = set( [p for parts in [ [p ["id"] for p in n[1].get("participants")] for n in graph.nodes( data = True) if n[1].get("participants")] for p in parts ])
real_isolates = isolates.difference( participants) # we have to discount those that are participants in a complex
print( "{} isolated nodes (ignoring participant nodes) ({} isolated species, {} isolated reactions)".format(
len( real_isolates),
len( [n for n in real_isolates if graph.node[n]["type"] == "species"]),
len( [n for n in real_isolates if graph.node[n]["type"] == "reaction"])))
print( "{} isolated nodes (including isolated participant nodes) ({} isolated species, {} isolated reactions)".format(
len( isolates),
len( [n for n in isolates if graph.node[n]["type"] == "species"]),
len( [n for n in isolates if graph.node[n]["type"] == "reaction"])))
########################################################################
########################################################################
def run_analysis_subgraphs( graph, subgraphs = None):
""" Compute some statistics for subgraphs: min,max,median """
print( "run_analysis_subgraphs( %s)" % graph.name)
# compute new graph with participant links
participant_edges = []
for n1 in graph.nodes(data=True):
if "participants" in n1[1] and n1[1]["participants"] != []:
participant_edges.extend( [(n1[1]["id"], n2["id"]) for n2 in n1[1]["participants"]])
if participant_edges != []:
graph = graph.copy()
[graph.add_edge( e[0], e[1], type = "participant") for e in participant_edges]
subgraphs = list( networkx.connected_component_subgraphs( graph))
elif subgraphs == None:
subgraphs = list( networkx.connected_component_subgraphs( graph))
nr_nodes = [ len( s.nodes()) for s in subgraphs]
nr_edges = [ len( s.edges()) for s in subgraphs]
print( "{} # subgraphs".format( len( subgraphs)))
print( "{}/{}/{}/{} min/mean/median/max # nodes per subgraph".format( min(nr_nodes), numpy.mean( nr_nodes), numpy.median( nr_nodes), max( nr_nodes)))
print( "{}/{}/{}/{} min/mean/median/max # edges per subgraph".format( min(nr_edges), numpy.mean( nr_edges), numpy.median( nr_nodes), max( nr_edges)))
print()
print( "# nodes per subgraph statistics: {}".format( collections.Counter( nr_nodes)))
print( "# edges per subgraph statistics: {}".format( collections.Counter( nr_edges)))
subgraphs_no_isolates = [ s for s in subgraphs if len(s.nodes()) > 1]
nr_nodes_subgraphs_no_isolates = [ len( s.nodes()) for s in subgraphs_no_isolates]
nr_edges_subgraphs_no_isolates = [ len( s.edges()) for s in subgraphs_no_isolates]
print( "\n--\n")
print( "{} # subgraphs no isolated nodes".format( len( subgraphs_no_isolates)))
print( "{}/{}/{}/{} min/mean/median/max # nodes per subgraphs no isolated nodes".format( min( nr_nodes_subgraphs_no_isolates), numpy.mean( nr_nodes_subgraphs_no_isolates), numpy.median( nr_nodes_subgraphs_no_isolates), max( nr_nodes_subgraphs_no_isolates)))
print( "{}/{}/{}/{} min/mean/median/max # edges per subgraphs no isolated nodes".format( min( nr_edges_subgraphs_no_isolates), numpy.mean( nr_edges_subgraphs_no_isolates), numpy.median( nr_edges_subgraphs_no_isolates), max( nr_edges_subgraphs_no_isolates)))
print()
print( "# nodes per subgraph (no isolated nodes) statistics: {}".format( collections.Counter( nr_nodes_subgraphs_no_isolates)))
print( "# edges per subgraph (no isolated nodes) statistics: {}".format( collections.Counter( nr_edges_subgraphs_no_isolates)))
########################################################################
########################################################################
def run_analysis_complex_participants( graph, participant_graph):
node_dict = { n[0] : n[1] for n in graph.nodes(data = True) }
edges_dict = { n: [] for n in node_dict.keys()}
for e in graph.edges( data = True):
edges_dict[e[0]].append( (e[1],e[2]["type"]))
edges_dict[e[1]].append( (e[0],e[2]["type"]))
reaction_participants = set( [ p for e in graph.edges() for p in e])
# sbo complexes
complexes = [n[1] for n in graph.nodes() if n[1]["type"] == "species" and n[1]["sbo"] == 253]
complexes_ids = set( [ c["id"] for c in complexes])
assert( len( complexes) == len( complexes_ids))
print( "{} total # of complexes (sbo == 253)".format( len( complexes)))
# complexes based on Participant edge
complexes2 = set( [ e[0] for e in participant_graph.edges()])
complexes2_participant = set( [ e[1] for e in participant_graph.edges()]) # participants of complexes
print( "{} total # of complexes (complex in a complex relationship with some participant)".format( len( complexes2)))
print( "{} total # of unique participants".format( len( complexes2_participant)))
# complexes part of reaction
complexes_in_reaction = complexes_ids.intersection( reaction_participants)
complexes_not_in_reaction = complexes_ids.difference( reaction_participants)
print( "{}/{} of complexes are part of a reaction ({}/{} are not)".format(
len( complexes_in_reaction),
len( complexes_ids),
len( complexes_not_in_reaction),
len( complexes_ids)))
# participants part of reaction
complexes_participant_in_reaction = complexes2_participant.intersection( reaction_participants)
complexes_participant_not_in_reaction = complexes2_participant.difference( reaction_participants)
print( "{}/{} of participants are part of a reaction ({}/{} are not)".format(
len( complexes_participant_in_reaction),
len( complexes2_participant),
len( complexes_participant_not_in_reaction),
len( complexes2_participant)))
complexes_participants_in_other_complexes = complexes_ids.intersection( complexes2_participant)
print( "{} complexes participate in other complexes".format( len( complexes_participants_in_other_complexes)))
multiple_complex_edge_participant = [n for n, c in collections.Counter( [ e[1] for e in participant_graph.edges()]).items() if c > 1]
print( "{} participants participate in multiple complexes".format( len(multiple_complex_edge_participant)))
## some annotation information
complexes_wo_bqbiol_is = [ c for c in complexes_ids if graph.node[c]["bqbiol_is"] == ()]
print( "{}/{} complexes w/o bqbiol_is".format( len( complexes_wo_bqbiol_is), len( complexes_ids)))
participants_wo_bqbiol_is = [ p for p in complexes2_participant if graph.node[p]["bqbiol_is"] == ()]
print( "{}/{} participants w/o bqbiol_is".format( len( participants_wo_bqbiol_is), len( complexes2_participant)))
########################################################################
########################################################################
def precision_recall_f_score( tp, fp, fn):
if len( tp) == 0 and len( fp) == 0:
precision = 0
else:
precision = len( tp) / float( len( tp) + len( fp))
if len( tp) == 0 and len( fn) == 0:
recall = 0
else:
recall = len( tp) / float( len( tp) + len( fn))
if precision == 0 and recall == 0:
f_score = 0.0
else:
f_score = 2.0 * (precision * recall) / (precision + recall)
return precision, recall, f_score
########################################################################
########################################################################
def set_overlap( set_1, set_2, equal_fn):
r_1 = set()
r_2 = set()
for e1 in set_1:
e2s = filter( lambda e2: equal_fn( e1, e2), set_2)
if e2s:
r_2 = r_2.union( e2s)
r_1.add( e1)
return r_1, r_2
def list_overlap( list_1, list_2, equal_fn):
""" Returns indices of overlapping elements"""
indices_1 = set()
indices_2 = set()
for i_1, e1 in enumerate( list_1):
is_2 = [i for i, e2 in enumerate(list_2) if equal_fn( e1, e2)]
if is_2 != []:
indices_2.update( is_2)
indices_1.add( i_1)
return indices_1, indices_2
def list_intersect( list_1, list_2):
l_1 = list_1[:]
l_2 = list_2[:]
result = []
while len(l_1) > 0:
e1 = l_1.pop()
try:
idx = l_2.index( e1)
except:
idx = None
if idx != None:
l_2.remove( e1)
result.append( e1)
return result
assert( list_intersect([1,2,3],[4,5]) == [])
assert( list_intersect([1,2,3],[1,5]) == [1])
assert( list_intersect([1,2,3,1],[1,5]) == [1])
assert( list_intersect([1,2,3,1],[1,1]) == [1,1])
def list_difference( list_1, list_2):
l_1 = list_1[:]
for e2 in list_2:
try:
l_1.remove(e2)
except:
pass
return l_1
assert( list_difference([1,2,3,1],[5,6]) == [1,2,3,1])
assert( list_difference([1,2,3,1],[1,6]) == [2,3,1])
assert( list_difference([1,2,3,1],[1,1,6]) == [2,3])
def list_find( el, listt, equal_fn):
for el2 in listt:
if equal_fn( el, el2):
return el2
return None
def list_difference2( list_1, list_2, equal_fn):
"returns those elements of list_1 which are not in list_2 according to equal_fn"
result = []
for e in list_1:
if not list_find( e, list_2, equal_fn):
result.append( e)
return result
def list_reduce2( list_1, equal_fn):
result = []
elements_remaining = list_1[:]
while elements_remaining:
el = elements_remaining.pop()
result.append( el)
new_elements_remaining = []
for el2 in elements_remaining:
if not equal_fn( el, el2):
new_elements_remaining.append( el2)
elements_remaining = new_elements_remaining
return result
assert( list_reduce2([1,"1",2,"2"], lambda e1, e2: str( e1) == str( e2)) == ['2','1'])
def merge( sets):
"merges sets which are disjoint"
merged = 1
while merged:
merged = 0
results = []
while sets:
common, rest = sets[0], sets[1:]
sets = []
for x in rest:
if x.isdisjoint(common):
sets.append(x)
else:
merged = 1
common |= x
results.append(common)
sets = results
return sets
def analyse_set_overlap( set_1, set_2, equal_fn = operator.eq):
res_1, res_2 = set_overlap( set_1, set_2, equal_fn)
if len( set_2) == 0:
precision = 0
else:
precision = 100.0 * len( res_2) / float( len( set_2))
if len( set_1) == 0:
recall = 0
else:
recall = 100.0 * len( res_1) / float( len( set_1))
if precision == 0 and recall == 0:
f_score = 0.0
else:
f_score = 2.0 * (precision * recall) / (precision + recall)
return res_1, res_2, precision, recall, f_score
def analyse_list_overlap( list_1, list_2, equal_fn = operator.eq):
res_1, res_2 = list_overlap( list_1, list_2, equal_fn)
if len( list_2) == 0:
precision = 0
else:
precision = 100.0 * len( res_2) / float( len( list_2))
if len( list_1) == 0:
recall = 0
else:
recall = 100.0 * len( res_1) / float( len( list_1))
if precision == 0 and recall == 0:
f_score = 0.0
else:
f_score = 2.0 * (precision * recall) / (precision + recall)
return res_1, res_2, precision, recall, f_score
def tuple_eq_empty_not_eq( t_1, t_2):
""" those which are empty are in fact not equal"""
return len( t_1) > 0 and t_1 == t_2
def tuple_overlaps( t_1, t_2):
return len( set(t_1).intersection( t_2)) > 0
def tuple_overlaps_sbo_is_a( t_1, t_2):
if tuple_overlaps( t_1, t_2):
return True
else:
sbo_terms_1 = get_sbo_terms( t_1)
sbo_terms_2 = get_sbo_terms( t_2)
for s1 in sbo_terms_1:
for s2 in sbo_terms_2:
if sbo_is_a2( s1, s2):
return True
def name_approx_equal( n1, n2):
return fuzzywuzzy.fuzz.ratio( n1, n2) > 90
########################################################################
########################################################################
def nm_name_equal( n1, n2):
"Checks if name is the same"
return n1["name"].lower() == n2["name"].lower()
def nm_name_equal_w_participants( n1, n2):
"Checks if name and names of participants overlap"
names_1 = [n1["name"].lower()] + [ p["name"].lower() for p in n1["participants"]]
names_2 = [n2["name"].lower()] + [ p["name"].lower() for p in n2["participants"]]
return len( set( names_1).intersection( names_2)) > 0
def nm_name_clean_equal( n1, n2):
"Checks if clean name is the same"
return remove_prefixes( n1["name"].lower()) == remove_prefixes( n2["name"].lower())
def nm_name_clean_equal_w_participants( n1, n2):
"Checks if name and names of participants overlap"
clean_names_1 = [remove_prefixes( n1["name"].lower())] + [ remove_prefixes( p["name"].lower()) for p in n1["participants"]]
clean_names_2 = [remove_prefixes( n2["name"].lower())] + [ remove_prefixes( p["name"].lower()) for p in n2["participants"]]
return len( set( clean_names_1).intersection( clean_names_2)) > 0
def nm_name_clean2_equal( n1, n2):
"Checks if clean name is the same"
return clean_name2( n1["name"]) == clean_name2( n2["name"])
def nm_name_clean_approx( n1, n2):
return fuzzywuzzy.fuzz.ratio( clean_name2( n1["name"]), clean_name2( n2["name"])) > 90
def nm_name_clean_approx_w_participants( n1, n2):
clean_names_1 = [ re.sub('[^a-zA-Z0-9-]', ' ', remove_prefixes( n1["name"].lower()))] + [ re.sub('[^a-zA-Z0-9-]', ' ', remove_prefixes( p["name"].lower())) for p in n1["participants"]]
clean_names_2 = [ re.sub('[^a-zA-Z0-9-]', ' ', remove_prefixes( n2["name"].lower()))] + [ re.sub('[^a-zA-Z0-9-]', ' ', remove_prefixes( p["name"].lower())) for p in n2["participants"]]
for name_1 in clean_names_1:
if list_find( name_1, clean_names_2, lambda name_1, name_2: fuzzywuzzy.fuzz.ratio( name_1, name_2) > 90):
return True
return False
def nm_gene_id_intersect( n1, n2):
set_1 = map_gene_simstring( n1["name"])
set_2 = map_gene_simstring( n2["name"])
return set_1 and set_2 and len( set_1.intersection( set_2)) > 0
def nm_gene_id_intersect_w_participants( n1, n2):
sets_1 = filter( bool, [map_gene_simstring(n) for n in [ n1["name"]] + [ p["name"] for p in n1["participants"]]])
sets_2 = filter( bool, [map_gene_simstring(n) for n in [ n2["name"]] + [ p["name"] for p in n2["participants"]]])
for s1 in sets_1:
for s2 in sets_2:
if len( s1.intersection( s2)) > 0:
return True
return False
def nm_name_clean_approx_OR_gene_id_intersect( n1, n2):
return nm_name_clean_approx( n1, n2) or nm_gene_id_intersect( n1, n2)
def nm_name_clean_approx_OR_gene_id_intersect_w_participants( n1, n2):
return nm_name_clean_approx_w_participants( n1, n2) or nm_gene_id_intersect_w_participants( n1, n2)
def nm_bqbiol_is_equal( n1, n2):
"Checks if the bqbiol_is are the same"
return n1["bqbiol_is"] and n2["bqbiol_is"] and n1["bqbiol_is"] == n2["bqbiol_is"]
def nm_bqbiol_is_equal_w_participants( n1, n2):
"Checks if the bqbiol_is are the same - also checks participants"
sets_1 = filter( bool, [set(n1["bqbiol_is"])] + [ set(p["bqbiol_is"]) for p in n1["participants"]])
sets_2 = filter( bool, [n2["bqbiol_is"]] + [ p["bqbiol_is"] for p in n2["participants"]])
for s1 in sets_1:
for s2 in sets_2:
if len( s1.intersection( s2)) > 0:
return True
return False
def nm_bqbiol_is_overlaps( n1, n2):
"Checks if the bqbiol_is are the same"
return n1["bqbiol_is"] and n2["bqbiol_is"] and len( set( n1["bqbiol_is"]).intersection( set( n2["bqbiol_is"]))) > 0
def nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
"Checks if the bqbiol_is are the same"
if nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["bqbiol_is"] and n2["bqbiol_is"]:
sbo_terms_1 = get_sbo_terms( n1["bqbiol_is"])
sbo_terms_2 = get_sbo_terms( n2["bqbiol_is"])
for s1 in sbo_terms_1:
for s2 in sbo_terms_2:
if sbo_is_a2( s1, s2):
return True
return False
def nm_bqbiol_is_overlaps_w_participants( n1, n2):
"Checks if the bqbiol_is overlaps - also checks participants"
set_1 = set( n1["bqbiol_is"])
if n1.get("participants"):
[set_1.update( p["bqbiol_is"]) for p in n1["participants"]]
set_2 = set( n2["bqbiol_is"])
if n2.get("participants"):
[set_2.update( p["bqbiol_is"]) for p in n2["participants"]]
if len( set_1.intersection( set_2)) > 0:
return True
else:
return False
def nm_bqbiol_is_has_part_overlaps( n1, n2):
"Checks if the bqbiol_is and bqbiol_has_part overlaps"
uris_1 = set()
if n1["bqbiol_is"]:
uris_1.update( n1["bqbiol_is"])
if n1["bqbiol_has_part"]:
uris_1.update( n1["bqbiol_has_part"])
uris_2 = set()
if n2["bqbiol_is"]:
uris_2.update( n2["bqbiol_is"])
if n1["bqbiol_has_part"]:
uris_2.update( n2["bqbiol_has_part"])
return len( uris_1.intersection( uris_2)) > 0
def nm_sbo_equal( n1, n2):
"Only works on reactions"
return n1["sbo"] and n2["sbo"] and n1["sbo"] == n2["sbo"]
def nm_sbo_is_a( n1, n2):
"Only works on reactions"
return n1["sbo_str"] and n2["sbo_str"] and sbo_is_a2( n1["sbo_str"], n2["sbo_str"])
################### name_clean + various reactions matches
def nm_name_clean_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_equal( n1, n2):
return True
else:
return False
def nm_name_clean_w_participants_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_equal_w_participants( n1, n2):
return True
else:
return False
def nm_name_clean_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_equal( n1, n2):
return True
else:
return False
def nm_name_clean_w_participants_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_equal_w_participants( n1, n2):
return True
else:
return False
def nm_name_clean_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_equal( n1, n2):
return True
else:
return False
def nm_name_clean_w_participants_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_equal_w_participants( n1, n2):
return True
else:
return False
################### name_clean_approx + various reactions matches
def nm_name_clean_approx_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_approx( n1, n2):
return True
else:
return False
def nm_name_clean_approx_w_participants_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_approx_w_participants( n1, n2):
return True
else:
return False
def nm_name_clean_approx_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_approx( n1, n2):
return True
else:
return False
def nm_name_clean_approx_w_participants_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_approx_w_participants( n1, n2):
return True
else:
return False
def nm_name_clean_approx_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_approx( n1, n2):
return True
else:
return False
def nm_name_clean_approx_w_participants_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species" and nm_name_clean_approx_w_participants( n1, n2):
return True
else:
return False
################### name_clean_approx or bqbiol_is_equal various reactions matches
def nm_name_clean_approx_OR_bqbiol_is_equal_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_equal( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_equal_w_participants_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx_w_participants( n1, n2) or nm_bqbiol_is_equal_w_participants( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_equal_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_equal( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_equal_w_participants_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx_w_participants( n1, n2) or nm_bqbiol_is_equal_w_participants( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_equal_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_equal( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_equal_w_participants_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx_w_participants( n1, n2) or nm_bqbiol_is_equal_w_participants( n1, n2)):
return True
else:
return False
################### name_clean_approx or bqbiol_is_overlaps various reactions matches
def nm_name_clean_approx_OR_bqbiol_is_overlaps_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_overlaps( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_overlaps_w_participants_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx_w_participants( n1, n2) or nm_bqbiol_is_overlaps_w_participants( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_overlaps_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_overlaps( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_overlaps_w_participants_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx_w_participants( n1, n2) or nm_bqbiol_is_overlaps_w_participants( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_overlaps_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_overlaps( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_overlaps_w_participants_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx_w_participants( n1, n2) or nm_bqbiol_is_overlaps_w_participants( n1, n2)):
return True
else:
return False
################### name_clean_approx or bqbiol_is_overlaps various reactions matches
def nm_name_clean_approx_OR_bqbiol_is_bqbiol_is_has_parts_overlaps_AND_nm_bqbiol_is_equal( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_equal( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_has_part_overlaps( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_bqbiol_is_has_parts_overlaps_AND_nm_bqbiol_is_overlaps( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_has_part_overlaps( n1, n2)):
return True
else:
return False
def nm_name_clean_approx_OR_bqbiol_is_bqbiol_is_has_parts_overlaps_AND_nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
if n1["type"] != n2["type"]:
return False
elif n1["type"] == "reaction" and nm_bqbiol_is_overlaps_sbo_is_a( n1, n2):
return True
elif n1["type"] == "species"and (nm_name_clean_approx( n1, n2) or nm_bqbiol_is_has_part_overlaps( n1, n2)):
return True
else:
return False
################### edge match exact
def edge_match_exact( e1, e2):
"only edges"
return e1["type"] == e2["type"]
########################################################################
########################################################################
# nodes overlap max
def compute_nodes_overlap_max( graph_1, graph_2, node_match):
""" computes a nodes in graph_2 matching with nodes in graph_1 according
to node_match
Returns - a dictionary of nodes """
nodes_2 = [ filter( lambda n2: node_match( graph_1.node[n1], graph_2.node[n2]), graph_2.nodes()) for n1 in graph_1.nodes()]
return { n1: n2 for n1, n2 in zip( graph_1.nodes(), nodes_2) if n2 }
def get_nodes_overlap_max_result_precision_recall_f_score( graph_1, graph_2, matches):
if len( graph_2) == 0:
precision = 0;
else:
precision = len( set( itertools.chain(*matches.values()))) / float( len( graph_2))
if len( graph_1) == 0:
recall = 0
else:
recall = len( matches.keys()) / float( len( graph_1))
if precision == 0 and recall == 0:
f_score = 0.0
else:
f_score = 2.0 * (precision * recall) / (precision + recall)
return 100.0 * precision, 100.0 * recall, 100.0 * f_score
def print_node_match_result( graph_1, graph_2, matches, node_match_name = "", export_matches = None):
# print results
precision, recall, f_score = get_nodes_overlap_max_result_precision_recall_f_score( graph_1, graph_2, matches)
print( "{}: {:.2f} & {:.2f} & {:.2f} node overlap (precision/recall/f-score)".format(
node_match_name, precision, recall, f_score))
# export text matches files
if export_matches:
with open( export_matches, "wt") as f:
clean_names_map = { clean_name2( graph_1.node[k]["name"]) : k for k in matches.keys()}
for n in sorted( clean_names_map.keys()):
k = clean_names_map[n]
if matches[k]:
f.write( "\n-------------------------------------------------------------\n")
f.write( n)
f.write( "\n--\n" )
names = set( [clean_name2( graph_2.node[v]["name"]) for v in matches[k]])
f.write( "\n".join(names))
def run_analysis_nodes_overlap_max( graph_1, graph_2, node_match,
export_results = False,
export_results_prefix = "results-nodes-overlap-max",
ignore_existing = False):
""" computes nodes overlap and prints statistics"""
export_file = "%s__%s__%s__%s.pickle" % (export_results_prefix, graph_1.name, graph_2.name, node_match.__name__)
if ignore_existing and os.path.exists( export_file):
print("%s:%s/%s:run_analysis_nodes_overlap_max:%s exists. using that one." % (now(),graph_1.name, graph_2.name, export_file))
data = pickle.load( open( export_file, "rb"))
graph_1, graph_2, matches = data[0], data[1], data[2]
else:
matches = compute_nodes_overlap_max( graph_1, graph_2, node_match)
print_node_match_result( graph_1, graph_2, matches, node_match_name = node_match.__name__)
if export_results and not( ignore_existing and os.path.exists( export_file)):
print("%s:%s/%s:run_analysis_nodes_overlap_max:Exporting %s" % (now(),graph_1.name, graph_2.name, export_file))
pickle.dump( [graph_1, graph_2, matches], open( export_file, "wb"))
def run_analyses_nodes_overlap_max( graph_1,
graph_2,
node_match_fns,
prefix = None,
n_jobs = None,
export_results = False,
export_results_prefix = "results-nodes-overlap-max"):
""" computes nodes overlaps according to multiple node_match_fns and prints statistics """
print( "-----------")
print( "run_analyses_nodes_overlap_max %s/%s n_jobs=%s -- %s" % (graph_1.name, graph_2.name, n_jobs, node_match_fns))
# compute the nodes of 2 that exist in 1 (ignoring edges)
if n_jobs:
with joblib.Parallel( n_jobs = n_jobs) as parallel:
parallel( joblib.delayed( run_analysis_nodes_overlap_max) ( graph_1, graph_2, fn, export_results = export_results, export_results_prefix = export_results_prefix)
for fn in node_match_fns)
else:
for nm in node_match_fns:
run_analysis_nodes_overlap_max( graph_1, graph_2, nm, export_results = export_results, export_results_prefix = export_results_prefix)
########################################################################
########################################################################
# subgraph overlap max
def match_subgraph_max( graph, subgraph, node_match, edge_match = edge_match_exact, file_name = None):
""" computes overlap for single subgraph"""
assert( subgraph or file_name)
if subgraph == None:
subgraph = pickle.load( open( file_name, "rb"))
graph_matcher = networkx.algorithms.isomorphism.GraphMatcher( graph,
subgraph,
node_match = node_match,
edge_match = edge_match)
result = list( graph_matcher.subgraph_isomorphisms_iter())
return result, subgraph
def match_graph_max( graph, file_name, node_match, edge_match = edge_match_exact):
""" computes overlap for graph loaded from a file"""
graph_2 = pickle.load( open( file_name, "rb"))
subgraphs = list( networkx.connected_component_subgraphs( graph_2))
graph_matchers = [networkx.algorithms.isomorphism.GraphMatcher( graph,
subgraph,
node_match = node_match,
edge_match = edge_match)
for subgraph in subgraphs]
results = [ (list( m.subgraph_isomorphisms_iter()), s) for m, s in zip( graph_matchers, subgraphs)]
return results
def match_subgraphs_max( graph,
subgraphs,
node_match,
edge_match = edge_match_exact,
n_jobs = None,
file_names = None):
""" computes overlap for subgraphs """
# compute the nodes of 2 that exist in 1 (ignoring edges)
assert( subgraphs or file_names)
if file_names: # use the files instead of subgraphs if possible
print( "Running match_subgraphs_max using file_names (individual graph files) n_jobs=%s" %(n_jobs))
if n_jobs:
with joblib.Parallel( n_jobs = n_jobs) as parallel:
results = parallel( joblib.delayed( match_graph_max) ( graph, file_name, node_match = node_match, edge_match = edge_match) for file_name in file_names)
else:
results = [ match_graph_max( graph, file_name, node_match = node_match, edge_match = edge_match) for file_name in file_names]
results = [r for result in results for r in result]
else:
print( "Running match_subgraphs_max using subgraphs n_jobs=%s" %(n_jobs))
if n_jobs:
with joblib.Parallel( n_jobs = n_jobs) as parallel:
results = parallel( joblib.delayed( match_subgraph_max) ( graph, subgraph, node_match, edge_match) for subgraph in subgraphs)
else:
results = [ match_subgraph_max( graph, subgraph, node_match = node_match, edge_match = edge_match) for subgraph in subgraphs]
results_matches = [r[0] for r in results]
results_subgraphs = [r[1] for r in results]
return results_matches, results_subgraphs
def subgraph_match_get_edges( subgraph, match, reverse_match, edge_signatures_1, edge_signatures_2):
""" Computes matching edges from match_subgraphs results """
m_edges = {}
for e in subgraph.edges( data = True):
# a bit of acrobatics to get around having to use digraph (which is buggy)
signature_1_1 = (reverse_match[e[0]], reverse_match[e[1]], e[2]["type"])
signature_1_2 = (reverse_match[e[1]], reverse_match[e[0]], e[2]["type"])
signature_2_1 = (e[0], e[1], e[2]["type"])
signature_2_2 = (e[1], e[0], e[2]["type"])
assert signature_1_1 in edge_signatures_1 or signature_1_2 in edge_signatures_1
assert not( signature_1_1 in edge_signatures_1 and signature_1_2 in edge_signatures_1)
assert signature_2_1 in edge_signatures_2 or signature_2_2 in edge_signatures_2
assert not( signature_2_1 in edge_signatures_2 and signature_2_2 in edge_signatures_2)
if signature_1_1 in edge_signatures_1:
signature_1 = signature_1_1
else:
signature_1 = signature_1_2
if signature_2_1 in edge_signatures_2:
signature_2 = signature_2_1
else:
signature_2 = signature_2_2
m_edges[signature_1] = signature_2
assert signature_1 in edge_signatures_1
assert signature_2 in edge_signatures_2
return m_edges
def compute_subgraphs_overlap_max( graph_1, graph_2,
node_match,
edge_match = edge_match_exact,
subgraphs_2 = None,
n_jobs = None,
export_results = False,
export_results_prefix = "results-subgraphs-overlap-max",
file_names = None,
ignore_existing = False):
""" compute the subgraphs in graph_1 isomorph to nodes in subgraphs of 2 """
if export_results:
export_file = "%s__%s__%s__%s__%s.pickle" % (export_results_prefix, graph_1.name, graph_2.name, node_match.__name__, edge_match.__name__)
if export_results and ignore_existing and os.path.exists( export_file):
print( "%s:%s/%s:compute_subgraphs_overlap_max:results exist %s, loading" % (now(), graph_1.name, graph_2.name, export_file))
data = pickle.load( open( export_file, "rb"))
graph_1, graph_2, results_subgraphs, results_matches = data[0], data[1], data[2], data[3]
return results_matches, results_subgraphs
if graph_2 and file_names == None and subgraphs_2 == None:
subgraphs_2 = list( networkx.connected_component_subgraphs( graph_2))
# Run!
results_matches, results_subgraphs = match_subgraphs_max( graph_1, subgraphs_2, node_match = node_match, edge_match = edge_match, n_jobs = n_jobs, file_names = file_names)
# export data
if export_results:
pickle.dump( [graph_1, graph_2, results_subgraphs, results_matches],
open( "%s__%s__%s__%s__%s.pickle" % (export_results_prefix, graph_1.name, graph_2.name, node_match.__name__, edge_match.__name__), "wb"))
return results_matches, results_subgraphs
def get_subgraphs_overlap_max_results( graph_1, graph_2, results_subgraphs, results_matches,
species_1 = None, species_2 = None, reactions_1 = None, reactions_2 = None):
""" takes results from matching and computes matches for nodes, edges, species, and reactions """
if species_1 == None:
species_1 = set(filter_species( graph_1).nodes())
if species_2 == None:
species_2 = set(filter_species( graph_2).nodes())
if reactions_1 == None:
reactions_1 = set( filter_reactions( graph_1).nodes())
if reactions_2 == None:
reactions_2 = set( filter_reactions( graph_2).nodes())
# collet results for analysis
matches_nodes_1 = set()
matches_nodes_2 = set()
matches_edges_1 = set()
matches_edges_2 = set()
edge_signatures_1 = edge_signatures( graph_1)
edge_signatures_2 = edge_signatures( graph_2)
for subgraph_2, matches in zip( results_subgraphs, results_matches):
for m in matches:
matches_nodes_1 = matches_nodes_1.union( m.keys())
matches_nodes_2 = matches_nodes_2.union( m.values())
reverse_m = { v: k for k, v in m.iteritems()}
m_edges = subgraph_match_get_edges( subgraph_2, m , reverse_m, edge_signatures_1, edge_signatures_2)
matches_edges_1 = matches_edges_1.union( m_edges.keys())
matches_edges_2 = matches_edges_2.union( m_edges.values())
species_1_matches = species_1.intersection( matches_nodes_1)
species_2_matches = species_2.intersection( matches_nodes_2)
reactions_1_matches = reactions_1.intersection( matches_nodes_1)
reactions_2_matches = reactions_2.intersection( matches_nodes_2)
return matches_nodes_1, matches_nodes_2, matches_edges_1, matches_edges_2, species_1_matches, species_2_matches, reactions_1_matches, reactions_2_matches
def get_subgraphs_overlap_max_results_precision_recall_f_score(graph_1, graph_2, results_subgraphs, results_matches,
species_1 = None, species_2 = None, reactions_1 = None, reactions_2 = None):
""" Returns precision recall for nodes, species, reactions, edges as a dict """
if species_1 == None:
species_1 = set(filter_species( graph_1).nodes())
if species_2 == None:
species_2 = set(filter_species( graph_2).nodes())
if reactions_1 == None:
reactions_1 = set( filter_reactions( graph_1).nodes())
if reactions_2 == None:
reactions_2 = set( filter_reactions( graph_2).nodes())
matches_nodes_1, matches_nodes_2, matches_edges_1, matches_edges_2, species_1_matches, species_2_matches, reactions_1_matches, reactions_2_matches = \
get_subgraphs_overlap_max_results( graph_1, graph_2, results_subgraphs, results_matches, species_1, species_2, reactions_1, reactions_2)
result = {}
precision = 100. * len( matches_nodes_2) / float( len( graph_2.nodes()))
recall = 100. * len( matches_nodes_1) / float( len( graph_1.nodes()))
result["node precision"] = precision
result["node recall"] = recall
if precision + recall == 0:
result["node f-score"] = 0
else:
result["node f-score"] = 2.0 * (precision * recall) / (precision + recall)
precision = 100. * len( species_2_matches) / float( len( species_2))
recall = 100. * len( species_1_matches) / float( len( species_1))
result["species precision"] = precision
result["species recall"] = recall
if precision + recall == 0:
result["species f-score"] = 0
else:
result["species f-score"] = 2.0 * (precision * recall) / (precision + recall)
precision = 100. * len( reactions_2_matches) / float( len( reactions_2))
recall = 100. * len( reactions_1_matches) / float( len( reactions_1))
result["reaction precision"] = precision
result["reaction recall"] = recall
if precision + recall == 0:
result["reaction f-score"] = 0
else:
result["reaction f-score"] = 2.0 * (precision * recall) / (precision + recall)
precision = 100. * len( matches_edges_2) / float( len( graph_2.edges()))
recall = 100. * len( matches_edges_1) / float( len( graph_1.edges()))
result["edge precision"] = precision
result["edge recall"] = recall
if precision + recall == 0:
result["edge f-score"] = 0
else:
result["edge f-score"] = 2.0 * (precision * recall) / (precision + recall)
return result
def print_analysis_subgraphs_overlap_results( graph_1, graph_2, node_match,
matches_nodes_1, matches_nodes_2, matches_edges_1, matches_edges_2,
species_1_matches, species_2_matches, reactions_1_matches, reactions_2_matches,
species_1 = None, species_2 = None,
reactions_1 = None, reactions_2 = None):
if not species_1:
species_1 = set( filter_species( graph_1).nodes())
if not species_2:
species_2 = set( filter_species( graph_2).nodes())
if not reactions_1:
reactions_1 = set( filter_reactions( graph_1).nodes())
if not reactions_2:
reactions_2 = set( filter_reactions( graph_2).nodes())
## print results
print( "{} {}/{}".format( node_match.__name__, graph_1.name, graph_2.name))
precision = 100. * len( matches_nodes_2) / float( len( graph_2.nodes()))
recall = 100. * len( matches_nodes_1) / float( len( graph_1.nodes()))
f_score = 0.0
if precision + recall > 0:
f_score = 2. * precision * recall / (precision + recall)
print( "%.2f & %.2f & %.2f node" % (precision, recall, f_score))
precision = 100. * len( species_2_matches) / float( len( species_2))
recall = 100. * len( species_1_matches) / float( len( species_1))
f_score = 0.0
if precision + recall > 0:
f_score = 2. * precision * recall / (precision + recall)
print( "%.2f & %.2f & %.2f species" % (precision, recall, f_score))
precision = 100 * len( reactions_2_matches) / float( len( reactions_2))
recall = 100 * len( reactions_1_matches) / float( len( reactions_1))
f_score = 0.0
if precision + recall > 0:
f_score = 2. * precision * recall / (precision + recall)
print( "%.2f & %.2f & %.2f reaction" % (precision, recall, f_score))
precision = 100 * len( matches_edges_2) / float( len( graph_2.edges()))
recall = 100 * len( matches_edges_1) / float( len( graph_1.edges()))
f_score = 0.0
if precision + recall > 0:
f_score = 2. * precision * recall / (precision + recall)
print( "%.2f & %.2f & %.2f edge" % (precision, recall, f_score))
def print_analysis_subgraphs_overlap_results_from_file( graph_1_name,
graph_2_name,
node_match,
edge_match = edge_match_exact,
prefix = "results/results-subgraphs-overlap-max"):
# load file
[graph_1, graph_2, results_subgraphs, results_matches] \
= pickle.load( open( "%s__%s__%s__%s__%s.pickle" % ( prefix,graph_1_name, graph_2_name, node_match.__name__, edge_match.__name__), "rb"))
# process results
species_1 = set(filter_species( graph_1).nodes())
species_2 = set(filter_species( graph_2).nodes())
reactions_1 = set( filter_reactions( graph_1).nodes())
reactions_2 = set( filter_reactions( graph_2).nodes())
matches_nodes_1, matches_nodes_2, \
matches_edges_1, matches_edges_2, \
species_1_matches, species_2_matches, \
reactions_1_matches, reactions_2_matches = \
get_subgraphs_overlap_max_results( graph_1, graph_2, results_subgraphs, results_matches, \
species_1 = species_1, species_2 = species_2, reactions_1 = reactions_1, reactions_2 = reactions_2)
# print results
print_analysis_subgraphs_overlap_results( graph_1, graph_2, node_match,
matches_nodes_1, matches_nodes_2, matches_edges_1, matches_edges_2,
species_1_matches, species_2_matches, reactions_1_matches, reactions_2_matches,
species_1, species_2, reactions_1, reactions_2)
def run_analysis_subgraphs_overlap( graph_1,
graph_2,
node_match,
edge_match = edge_match_exact,
subgraphs_2 = None,
species_1 = None,
species_2 = None,
reactions_1 = None,
reactions_2 = None,
n_jobs = None,
export_results = False,
export_results_prefix = "results-subgraphs-overlap-max",
file_names = None,
print_results = True,
ignore_existing = False):
""" runs analysis for subgraphs """
print( "-----------")
print( "%s: run_analysis_subgraphs_overlap %s/%s -- %s" % (now(), graph_1.name, graph_2.name, node_match.__name__))
if subgraphs_2 == None:
subgraphs_2 = list( networkx.connected_component_subgraphs( graph_2))
if not species_1:
species_1 = set( filter_species( graph_1).nodes())
if not species_2:
species_2 = set( filter_species( graph_2).nodes())
if not reactions_1:
reactions_1 = set( filter_reactions( graph_1).nodes())
if not reactions_2:
reactions_2 = set( filter_reactions( graph_2).nodes())
results_matches, results_subgraphs \
= compute_subgraphs_overlap_max( graph_1, graph_2,
node_match = node_match,
edge_match = edge_match_exact,
subgraphs_2 = subgraphs_2,
n_jobs = n_jobs,
export_results = export_results,
export_results_prefix = export_results_prefix,
file_names = file_names,
ignore_existing = ignore_existing)
if print_results:
# process results
matches_nodes_1, matches_nodes_2, \
matches_edges_1, matches_edges_2, \
species_1_matches, species_2_matches, \
reactions_1_matches, reactions_2_matches = \
get_subgraphs_overlap_max_results( graph_1, graph_2, results_subgraphs, results_matches, \
species_1 = species_1, species_2 = species_2, reactions_1 = reactions_1, reactions_2 = reactions_2)
# print results
print_analysis_subgraphs_overlap_results( graph_1, graph_2, node_match,
matches_nodes_1, matches_nodes_2, matches_edges_1, matches_edges_2,
species_1_matches, species_2_matches, reactions_1_matches, reactions_2_matches,
species_1, species_2, reactions_1, reactions_2)
return results_matches, results_subgraphs
def run_analyses_subgraphs_overlap( graph_1,
graph_2,
node_match_fns,
subgraphs_2 = None,
export_results = False,
export_results_prefix = "results-subgraphs-overlap-max",
print_results = True):
""" runs analysis for subgraphs for multiple node_match_fns"""
print( "-----------")
print( "run_analyses_subgraphs_overlap {}/{} -- {}".format( graph_1.name, graph_2.name, node_match_fns))
if subgraphs_2 == None:
subgraphs_2 = list( networkx.connected_component_subgraphs( graph_2))
species_1 = set( filter_species( graph_1).nodes())
species_2 = set( filter_species( graph_2).nodes())
reactions_1 = set( filter_reactions( graph_1).nodes())
reactions_2 = set( filter_reactions( graph_2).nodes())
for node_match in node_match_fns:
print("\n---")
run_analysis_subgraphs_overlap( graph_1, graph_2, node_match, edge_match = edge_match_exact,
subgraphs_2 = subgraphs_2,
species_1 = species_1,
species_2 = species_2,
reactions_1 = reactions_1,
reactions_2 = reactions_2,
export_results = export_results,
export_results_prefix = export_results_prefix)
########################################################################
########################################################################
## side-by-side graphviz
def _graphviz_label( n, graph, n_id_map = {}, id_prefix = "", participant = False, show_identifier = False):
if graph.node[n].get("participants"):
n_id_map[n] = id_prefix + n
label = graph.node[n]["name"]
if show_identifier:
label += "\n" + n
label = "<table>%s%s</table>" % ("<tr><td port=\"%s\"><b>%s</b></td></tr>" % (n, label),
"".join([ _graphviz_label( p, graph, n_id_map, id_prefix = id_prefix, participant = True) for p in graph.node[n]["participant_ids"]]))
if participant:
return "<tr><td>%s</td></tr>" % label
else:
return "<%s>" % label
elif graph.node[n]["type"] == "species":
n_id_map[n] = id_prefix + n
label = graph.node[n]["name"]
if show_identifier:
label += "\n" + n
if participant:
return "<tr><td port=\"%s\">%s</td></tr>" % (n, label)
else:
return label
else:
n_id_map[n] = n
label = ", ".join( sbo_go_name(b) for b in graph.node[n]["bqbiol_is"])
if show_identifier:
label += "\n" + n
return label
def _graphviz_add_node( n, graph, graphviz_graph, n_id_map = {}, label = None, show_identifier = False, **kwargs):
""" adds a node top level (should not be participant of a complex) """
if label == None and graph.node[n].get("participants"):
label = _graphviz_label( n, graph, n_id_map, id_prefix = n + ":", show_identifier = show_identifier)
else:
label = _graphviz_label( n, graph, n_id_map, show_identifier = show_identifier)
if graph.node[n].get("participants"): # has participants
graphviz_graph.node( n, label = label, shape = "none", **kwargs)
elif graph.node[n]["type"] == "species":
graphviz_graph.node( n, label = label, shape = "rectangle", **kwargs)
else:
graphviz_graph.node( n, label = label, shape = "ellipse", **kwargs)
def _graphviz_add_edge( e, graph, graphviz_graph, n_id_map = {}, **kwargs):
""" adds an edge to the graphviz graph """
if (e[2] == "product" and not graph.node[e[0]]["type"] == "reaction") \
or (e[2] != "product" and not graph.node[e[1]]["type"] == "reaction"):
e = (e[1],e[0],e[2])
e0 = e[0]
e1 = e[1]
if e0 in n_id_map:
e0 = n_id_map[e0]
if e1 in n_id_map:
e1 = n_id_map[e1]
if e[2] == "modifier":
graphviz_graph.edge( e0, e1, arrowhead = "diamond", **kwargs)
else:
graphviz_graph.edge( e0, e1, **kwargs)
def graphviz_graph( graph, file_name = "test.dot", view = True, show_identifier = False):
""" renders a graph using dot"""
import graphviz
n_id_map = {}
participant_complex_map = { id : n for n in graph.nodes() if graph.node[n].get("participant_ids") for id in graph.node[n].get("participant_ids")}
top_nodes = set( graph.nodes()).difference( participant_complex_map.keys())
graphviz_graph = graphviz.Digraph()
[_graphviz_add_node( n, graph, graphviz_graph, n_id_map, show_identifier = show_identifier) for n in top_nodes];
[_graphviz_add_edge( (e[0], e[1], e[2]["type"]), graph, graphviz_graph, n_id_map) for e in graph.edges( data = True)];
graphviz_graph.render( file_name, view = view)
def get_top_complex( n, graph, participant_complex_map = {}):
if participant_complex_map == {}:
participant_complex_map = { id : n for n in graph.nodes() if graph.node[n].get("participant_ids") for id in graph.node[n].get("participant_ids")}
if not (n in participant_complex_map):
return n
else:
return get_top_complex( participant_complex_map[n], graph, participant_complex_map)
def graphviz_comparison_graph( graph_1, graph_2, m_nodes, m_edges, file_name = "test.dot", view = True, include_context_graph_1 = True, show_identifier = False):
""" Creates a graph visualization visualizing a match (left, right) """
import graphviz
g = graphviz.Digraph()
s1 = graphviz.Digraph( "cluster_1")
s1.body.append( "\tlabel=\"%s\"" % graph_1.name)
participant_complex_map_1 = { id : n for n in graph_1.nodes() if graph_1.node[n].get("participant_ids") for id in graph_1.node[n].get("participant_ids")}
top_complexes_1 = [ get_top_complex( n, graph_1, participant_complex_map_1) for n in m_nodes.keys()]
n_id_map_1 = {}
[_graphviz_add_node( n, graph_1, s1, n_id_map_1, show_identifier = show_identifier) for n in top_complexes_1]
[_graphviz_add_edge( e, graph_1, s1, n_id_map_1) for e in m_edges.keys()]
if include_context_graph_1:
context_edges = set([ sort_edge_signature((edge[0], edge[1], edge[2]["type"]), graph_1) for edge in graph_1.edges( m_nodes.keys(), data = True)]).difference( m_edges.keys())
context_nodes = set( [ c for e in context_edges for c in e[:2]]).difference( top_complexes_1)
# add nodes
[_graphviz_add_node( get_top_complex( n, graph_1, participant_complex_map_1), graph_1, s1, n_id_map_1, color = "grey", show_identifier = show_identifier) for n in context_nodes]
# add edges
[_graphviz_add_edge( e, graph_1, s1, n_id_map_1, color = "grey") for e in context_edges]
g.subgraph( s1)
s2 = graphviz.Digraph( "cluster_2")
s2.body.append( "\tlabel=\"%s\"" % graph_2.name)
participant_complex_map_2 = { id : n for n in graph_2.nodes() if graph_2.node[n].get("participant_ids") for id in graph_2.node[n].get("participant_ids")}
top_complexes_2 = [ get_top_complex( n, graph_2, participant_complex_map_2) for n in m_nodes.values()]
n_id_map_2 = {}
[_graphviz_add_node( n, graph_2, s2, n_id_map_2, show_identifier = show_identifier) for n in top_complexes_2]
[_graphviz_add_edge( e, graph_2, s2, n_id_map_2) for e in m_edges.values()]
g.subgraph( s2)
for n1, n2 in m_nodes.iteritems():
g.edge( n_id_map_1[n1], n_id_map_2[n2], dir = "none", style = "dotted", constraint = "false")
g.render( file_name, view = view)
#graphviz_comparison_graph( graph_1, graph_2, m_nodes, m_edges)
#graphviz_comparison_graph( graph_1, graph_2, m_nodes, m_edges, include_context_graph_1 = False)
########################################################################
########################################################################
## graphviz color overlap
def _graphviz_label2( n, graph, n_id_map = {}, id_prefix = "", matched_nodes = set(), participant = False, **kwargs):
""" Generates colored labels condition on whether a node matched """
# choose color
color = kwargs["color"]
fontcolor = kwargs["fontcolor"]
fillcolor = kwargs["fillcolor"]
show_identifier = kwargs["show_identifier"]
if n in matched_nodes:
color = kwargs["matched_color"]
fontcolor = kwargs["matched_fontcolor"]
fillcolor = kwargs["matched_fillcolor"]
# handle different node types (with or without participants etc)
if graph.node[n].get("participants"):
n_id_map[n] = id_prefix + n
label = "<table>%s%s</table>" \
% ("<tr><td port=\"%s\" color=\"%s\" bgcolor=\"%s\"><font color=\"%s\"><b>%s</b></font></td></tr>" % (n, color, fillcolor, fontcolor, graph.node[n]["name"]),
"".join([ _graphviz_label2( p, graph, n_id_map, id_prefix = id_prefix, participant = True, **kwargs) for p in graph.node[n]["participant_ids"]]))
if participant:
return "<tr><td>%s</td></tr>" % label
else:
return "<%s>" % label
elif graph.node[n]["type"] == "species":
n_id_map[n] = id_prefix + n
if participant:
return "<tr><td port=\"%s\" color=\"%s\" bgcolor=\"%s\"><font color=\"%s\">%s</font></td></tr>" % (n, color, fillcolor, fontcolor, graph.node[n]["name"])
else:
label = graph.node[n]["name"]
if show_identifier:
label += "\n" + n
return label
else:
n_id_map[n] = n
label = ", ".join( sbo_go_name(b) for b in graph.node[n]["bqbiol_is"])
if show_identifier:
label += "\n" + n
return label
def _graphviz_add_node2( n, graph, graphviz_graph, n_id_map = {}, matched_nodes = set(),
**kwargs):
# choose color
color = kwargs["color"]
fontcolor = kwargs["fontcolor"]
fillcolor = kwargs["fillcolor"]
if n in matched_nodes:
color = kwargs["matched_color"]
fontcolor = kwargs["matched_fontcolor"]
fillcolor = kwargs["matched_fillcolor"]
# compute label
if graph.node[n].get("participants"):
label = _graphviz_label2( n, graph, n_id_map, id_prefix = n + ":", matched_nodes = matched_nodes, **kwargs)
else:
label = _graphviz_label2( n, graph, n_id_map, matched_nodes = matched_nodes, **kwargs)
if graph.node[n].get("participants"): # has participants
graphviz_graph.node( n, label = label, color = color, fontcolor = fontcolor, fillcolor = fillcolor, shape = "none")
elif graph.node[n]["type"] == "species": # simple species
graphviz_graph.node( n, label = label, shape = "rectangle", color = color, fontcolor = fontcolor, fillcolor = fillcolor, style = "filled")
else: # simple reaction
graphviz_graph.node( n, label = label, shape = "ellipse", color = color, fontcolor = fontcolor, fillcolor = fillcolor, style = "filled")
def graphviz_comparison_graph2( graph_1,
matched_nodes = set(),
matched_edges = set(),
file_name = "test.dot",
view = True,
mode = "only_match", # can be only_match, context, all
matched_color = "red",
matched_fontcolor = "red",
matched_fillcolor = "white",
fontcolor = "grey",
color = "grey",
fillcolor = "white",
show_identifier = False):
""" Visualization of matched nodes and edges using different color (single graph)"""
import graphviz
g = graphviz.Digraph()
participant_complex_map_1 = { id : n for n in graph_1.nodes() if graph_1.node[n].get("participant_ids") for id in graph_1.node[n].get("participant_ids")}
top_complexes_1 = [ get_top_complex( n, graph_1, participant_complex_map_1) for n in matched_nodes]
n_id_map_1 = {}
for n in top_complexes_1:
_graphviz_add_node2( n, graph_1, g, n_id_map_1, matched_nodes,
matched_color = matched_color,
matched_fontcolor = matched_fontcolor,
matched_fillcolor = matched_fillcolor,
fontcolor = fontcolor,
color = color,
fillcolor = fillcolor,
show_identifier = show_identifier)
[_graphviz_add_edge( e, graph_1, g, n_id_map_1, color = matched_color) for e in matched_edges]
if mode == "context":
context_edges = set([ sort_edge_signature((edge[0], edge[1], edge[2]["type"]), graph_1) for edge in graph_1.edges( matched_nodes, data = True)]).difference( matched_edges)
context_nodes_complexes = set([get_top_complex( n, graph_1, participant_complex_map_1) for n in set( [ c for e in context_edges for c in e[:2]]).difference( matched_nodes)]).difference(top_complexes_1)
# add context nodes
[_graphviz_add_node2( n, graph_1, g, n_id_map_1, color = color, fontcolor = fontcolor, fillcolor = fillcolor, show_identifier = show_identifier) for n in context_nodes_complexes]
# add context edges
[_graphviz_add_edge( e, graph_1, g, color = color) for e in set(context_edges).difference( matched_edges)]
elif mode == "all":
all_top_complexes = set( [ get_top_complex( n, graph_1, participant_complex_map_1) for n in set(graph_1.nodes()).difference( matched_nodes)])
all_edges = set([ (edge[0], edge[1], edge[2]["type"]) for edge in graph_1.edges( data = True)]).difference( matched_edges)
# add context nodes
[_graphviz_add_node2( n, graph_1, g, n_id_map_1, color = color, fontcolor = fontcolor, fillcolor = fillcolor, show_identifier = show_identifier) for n in all_top_complexes]
# add context edges
[_graphviz_add_edge( e, graph_1, g, color = color) for e in all_edges]
g.render( file_name, view = view)
#graphviz_comparison_graph2( graph_1, set(m_nodes.keys()), set(m_edges.keys()))
#graphviz_comparison_graph2( graph_1, set(m_nodes.keys()), set(m_edges.keys()), mode = "all")
########################################################################
########################################################################
def subgraph_overlap_graphviz( file_name = "TARGET__NLP-ANN__nm_name_clean_approx_OR_gene_id_intersect_AND_sbo_is_a__edge_match_exact--MAX.pickle"):
""" Creates a single overlap graph from subgraph match results (color based) """
import pickle
[graph_1, graph_2, subgraphs_2, matches_list] = pickle.load( open( file_name, "rb"))
edge_signatures_1 = edge_signatures( graph_1)
edge_signatures_2 = edge_signatures( graph_2)
all_matched_nodes_1 = set()
all_matched_edges_1 = set()
for subgraph_2, matches in zip( subgraphs_2, matches_list):
for m in matches:
all_matched_nodes_1.update( m.keys())
reverse_m = { v: k for k, v in m.iteritems()}
m_edges = subgraph_match_get_edges( subgraph_2, m , reverse_m, edge_signatures_1, edge_signatures_2)
all_matched_edges_1.update( m_edges.keys())
graphviz_comparison_graph2( graph_1, all_matched_nodes_1, all_matched_edges_1, mode = "all", file_name = file_name + ".dot")
def subgraph_overlaps_graphviz( input_file = "results/results-subgraphs-overlap-max__TARGET__NLP-ANN__nm_name_clean_approx_OR_bqbiol_is_equal_AND_nm_bqbiol_is_overlaps_sbo_is_a__edge_match_exact.pickle",
output_file_prefix = "results-subgraphs-overlap-max__TARGET__NLP-ANN__nm_name_clean_approx_OR_bqbiol_is_equal_AND_nm_bqbiol_is_overlaps_sbo_is_a__edge_match_exact",
include_context_graph_1 = True,
ignore_isolated_nodes = True,
graph_1 = None,
graph_2 = None,
show_identifier = False,
reactions_1 = None,
reactions_2 = None,
graph_1_reaction_txt_mapping = None,
graph_2_reaction_txt_mapping = None):
""" Creates many overlap graph from subgraph match results (comparison graph left/right) """
import pickle
[graph_1_f, graph_2_f, subgraphs_2, matches_list] = pickle.load( open( input_file, "rb"))
if graph_1 == None:
graph_1 = graph_1_f
if graph_2 == None:
graph_2 = graph_2_f
if reactions_1 == None:
reactions_1 = set( filter_reactions( graph_1))
if reactions_2 == None:
reactions_2 = set( filter_reactions( graph_2))
edge_signatures_1 = edge_signatures( graph_1)
edge_signatures_2 = edge_signatures( graph_2)
for i, subgraph_2, matches in zip( range(len(subgraphs_2)), subgraphs_2, matches_list):
print( "Processing %i of %i" % (i, len(subgraphs_2)))
if ignore_isolated_nodes and len(subgraph_2.nodes()) < 2:
print( "Ignoring %i of %i" % (i, len(subgraphs_2)))
else:
for j, m_nodes in enumerate( matches):
print( "Processing matches %i of %i" % (j, len(matches)))
reverse_m_nodes = { v: k for k, v in m_nodes.iteritems()}
m_edges = subgraph_match_get_edges( subgraph_2, m_nodes, reverse_m_nodes, edge_signatures_1, edge_signatures_2)
output_file = "%s-%i-%i.dot" % ( output_file_prefix, i, j)
print( "Exporting %s" % output_file)
graphviz_comparison_graph( graph_1,
graph_2,
m_nodes,
m_edges,
file_name = output_file,
view = False,
show_identifier = show_identifier,
include_context_graph_1 = include_context_graph_1)
if graph_1_reaction_txt_mapping:
output_file = "%s-%i-%i-target.txt" % ( output_file_prefix, i, j)
print( "Exporting %s" % output_file)
m_reactions_1 = reactions_1.intersection( m_nodes.keys())
open( output_file, "wt").write( "\n".join( [graph_1_reaction_txt_mapping[r] for r in m_reactions_1 if r in graph_1_reaction_txt_mapping]))
if graph_2_reaction_txt_mapping:
output_file = "%s-%i-%i-nlp.txt" % ( output_file_prefix, i, j)
print( "Exporting %s" % output_file)
m_reactions_2 = reactions_2.intersection( m_nodes.values())
open( output_file, "wt").write( "\n".join( [graph_2_reaction_txt_mapping[r] for r in m_reactions_2 if r in graph_2_reaction_txt_mapping]))
########################################################################
########################################################################
## overlap SBML
def _sbml_color_all( root, color_lines = "90000000", color_bounds = "00000000"):
namespaces = {"cd" : "http://www.sbml.org/2001/ns/celldesigner", "sbml" : "http://www.sbml.org/sbml/level2"}
for line in root.xpath("//cd:line", namespaces = namespaces):
line.set( "color", color_lines)
for paint in root.xpath("//cd:paint", namespaces = namespaces):
paint.set( "color", color_bounds)
def _sbml_color_reaction( root, reaction_id, color = "ffff0000", width = "1.0"):
""" colors the reaction links to reactant and product"""
namespaces = {"cd" : "http://www.sbml.org/2001/ns/celldesigner", "sbml" : "http://www.sbml.org/sbml/level2"}
lines = root.xpath("//sbml:reaction[@id='%s']/sbml:annotation/cd:line" % reaction_id, namespaces = namespaces)
assert( len(lines) == 1)
lines[0].set( "color", color)
lines[0].set( "width", width)
def _sbml_color_reaction_modifier( root, reaction_id, modifier_id, color = "ffff0000", width = "1.0"):
namespaces = {"cd" : "http://www.sbml.org/2001/ns/celldesigner", "sbml" : "http://www.sbml.org/sbml/level2"}
lines = root.xpath("//sbml:reaction[@id='%s']/sbml:annotation/cd:listOfModification/cd:modification[@aliases='%s']/cd:line" % (reaction_id, modifier_id), namespaces = namespaces)
if len(lines) == 1:
lines[0].set( "color", color)
lines[0].set( "width", width)
else:
print( "_sbml_color_reaction_modifier:Ignoring %s/%s" % (reaction_id, modifier_id))
def _sbml_color_species( root, species_id, color = "ffff0000"):
namespaces = {"cd" : "http://www.sbml.org/2001/ns/celldesigner", "sbml" : "http://www.sbml.org/sbml/level2"}
paints = root.xpath( "//cd:speciesAlias[@id='%s']//cd:paint" % species_id, namespaces = namespaces) \
or root.xpath( "//cd:complexSpeciesAlias[@id='%s']//cd:paint" % species_id, namespaces = namespaces)
assert( len(paints) > 0)
[ p.set( "color", color) for p in paints]
def subgraph_overlaps_sbml( graph_1,
matches_nodes_1 = set(),
matches_edges_1 = set(),
inn = 'mTORPathway-celldesigner.xml',
out = 'mTORPathway-celldesigner-color.xml',
background_color_bounds = "00000000",
background_color_lines = "000000",
matched_color = "FF00FF00",
matched_line_width = "2.0"):
""" Visualization of matched species and reactions using different color (single graph)"""
print( "sbml_color_matched:Loading %s" % inn)
tree = lxml.etree.parse( inn);
root = tree.getroot()
print( "subgraph_overlaps_sbml:Coloring background")
_sbml_color_all( root, color_bounds = background_color_bounds, color_lines = background_color_lines)
# color species
print( "subgraph_overlaps_sbml:Coloring matched species")
for n in set( filter_species( graph_1).nodes()).intersection( matches_nodes_1):
_sbml_color_species( root, n, color = matched_color)
print( "subgraph_overlaps_sbml:Coloring matched reactions")
matched_reactions = set( filter_reactions( graph_1).nodes()).intersection( matches_nodes_1)
modifier_edges = filter( lambda e: e[2] == "modifier", matches_edges_1)
matched_modifiers = { r : [e[0] for e in modifier_edges if e[1] == r] for r in matched_reactions}
for r in matched_reactions:
_sbml_color_reaction( root, r, color = matched_color, width = matched_line_width)
for m in matched_modifiers[r]:
_sbml_color_reaction_modifier( root, r, m, color = matched_color, width = matched_line_width)
print( "subgraph_overlaps_sbml:Outputting %s" % out)
tree.write( out, encoding='utf-8', xml_declaration = True)
########################################################################
########################################################################
# initialize
def initialize():
global SBO_NODES, GENE_MAP, SIMSTRING_DB
print( "Initializing networkx_analysis.py")
print( "Loading SBO")
SBO_NODES = pickle.load( open( "sbo.pickle", "rb"))
print( "Loading GENE_MAP")
GENE_MAP = pickle.load( open( "gene_map.pickle", "rb"))
print( "Loading SIMSTRING_DB")
SIMSTRING_DB = simstring.reader( 'gene_list.simstring')
SIMSTRING_DB.measure = simstring.cosine
SIMSTRING_DB.threshold = 0.9
def load_pathway( name,
input_file,
output_file,
output_file_participant_graph = None,
output_file_w_participant_edges = None,
ending = ".xml",
pickle_graph = True,
prefix = ""):
# load data
print( "Loading %s" % (input_file))
sbml = load_sbml( input_file)
model = sbml.getModel();
if model == None:
print( "Error loading %s" % (input_file))
return;
graph, participant_graph, graph_w_participant_edges = create_graph( model, prefix = prefix)
graph.name = name
graph.source_file_name = input_file
graph.file_name = output_file
if pickle_graph == True:
print( "Saving networkx as " + output_file)
pickle_output_file = output_file
networkx.write_gpickle( graph, pickle_output_file)
if output_file_participant_graph:
print( "Saving participant_graph networkx as " + output_file_participant_graph)
networkx.write_gpickle( participant_graph, output_file_participant_graph)
if output_file_w_participant_edges:
print( "Saving graph with participant edges networkx as " + output_file_w_participant_edges)
networkx.write_gpickle( graph_w_participant_edges, output_file_w_participant_edges)
return graph, participant_graph, graph_w_participant_edges
########################################################################
########################################################################
# PROCESSING SIGNATURES
def run_analysis_bqbiol_is_signatures( bqbiol_is_1, bqbiol_is_2,
name_1 = "name_1", name_2 = "name_2", type = "species",
equal_fns = [operator.eq]):
bqbiol_is_terms_set_1 = set( [b for t in bqbiol_is_1 for b in t])
bqbiol_is_set_1 = set(bqbiol_is_1)
bqbiol_is_terms_set_2 = set( [b for t in bqbiol_is_2 for b in t])
bqbiol_is_set_2 = set(bqbiol_is_2)
data = []
res_1, res_2, precision, recall, f_score = analyse_set_overlap( bqbiol_is_terms_set_1, bqbiol_is_terms_set_2)
print("%s:%s/%s:%s unique bqbiol_is terms equal: %.2f & %.2f & %.2f precision/recall/fscore" % (now(), name_1, name_2, type,
precision, recall, f_score))
data.append({ "graph_1" : name_1, "graph_2" : name_2, "unique" : True, "type" : type,
"reduction" : "bqbiol_is terms", "eq" : "eq",
"precision" : precision, "recall" : recall, "f-score" : f_score})
for eq_fun in equal_fns:
res_1, res_2, precision, recall, f_score = analyse_set_overlap( bqbiol_is_set_1, bqbiol_is_set_2, eq_fun)
print("%s:%s/%s:%s unique bqbiol_is signatures %s: %.2f & %.2f & %.2f precision/recall/fscore" % (now(), name_1, name_2, type, eq_fun.__name__,
precision, recall, f_score))
data.append({ "graph_1" : name_1, "graph_2" : name_2, "unique" : True, "type" : type,
"reduction" : "bqbiol_is signatures", "eq" : eq_fun.__name__,
"precision" : precision, "recall" : recall, "f-score" : f_score})
res_1, res_2, precision, recall, f_score = analyse_list_overlap( bqbiol_is_1, bqbiol_is_2, eq_fun)
print("%s:%s/%s:%s bqbiol_is signatures %s: %.2f & %.2f & %.2f precision/recall/fscore" % (now(), name_1, name_2, type, eq_fun.__name__,
precision, recall, f_score))
data.append({ "graph_1" : name_1, "graph_2" : name_2, "unique" : False, "type" : type,
"reduction" : "bqbiol_is signatures", "eq" : eq_fun.__name__,
"precision" : precision, "recall" : recall, "f-score" : f_score})
return data
def run_analysis_species_signatures( graph_1, graph_2, species_1 = None, species_2 = None):
import pandas
print("%s:%s/%s:run_analysis_species_signatures" % (now(), graph_1.name, graph_2.name))
if species_1 == None:
print("%s:%s/%s:run_analysis_species_signatures:filtering species graph_1" % (now(), graph_1.name, graph_2.name))
species_1 = filter_species( graph_1)
if species_2 == None:
print("%s:%s/%s:run_analysis_species_signatures:filtering species graph_2" % (now(), graph_1.name, graph_2.name))
species_2 = filter_species( graph_2)
data = []
print("%s:%s/%s:run_analysis_species_signatures:names" % (now(), graph_1.name, graph_2.name))
for reduction_fun, equality_fn in zip( [clean_name, clean_name2, clean_name2], [operator.eq, operator.eq, name_approx_equal]):
source_target = ([ reduction_fun( graph_1.node[n]["name"]) for n in species_1],
[ reduction_fun( graph_2.node[n]["name"]) for n in species_2])
res_1, res_2, precision, recall, f_score = analyse_set_overlap( set(source_target[0]), set(source_target[1]), equality_fn)
print("%s:%s/%s: species unique overlap %s/%s: %.2f & %.2f & %.2f precision/recall/fscore" % (now(), graph_1.name, graph_2.name, reduction_fun.__name__, equality_fn.__name__,
precision, recall, f_score))
data.append({ "graph_1" : graph_1.name, "graph_2" : graph_2.name, "unique" : True, "type" : "species",
"reduction" : reduction_fun.__name__, "eq" : equality_fn.__name__,
"precision" : precision, "recall" : recall, "f-score" : f_score})
res_1, res_2, precision, recall, f_score = analyse_list_overlap( source_target[0], source_target[1], equality_fn)
print("%s:%s/%s: species overlap %s/%s: %.2f & %.2f & %.2f precision/recall/fscore" % (now(), graph_1.name, graph_2.name, reduction_fun.__name__, equality_fn.__name__,
precision, recall, f_score))
data.append({ "graph_1" : graph_1.name, "graph_2" : graph_2.name, "unique" : False, "type" : "species",
"reduction" : reduction_fun.__name__, "eq" : equality_fn.__name__,
"precision" : precision, "recall" : recall, "f-score" : f_score})
# BQBIOL_IS
print("%s:%s/%s:run_analysis_species_signatures:running bqbiol_is" % (now(), graph_1.name, graph_2.name))
data.extend( run_analysis_bqbiol_is_signatures( bqbiol_is_1 = [ graph_1.node[n]["bqbiol_is"] for n in species_1],
bqbiol_is_2 = [ graph_2.node[n]["bqbiol_is"] for n in species_2],
name_1 = graph_1.name, name_2 = graph_2.name, type = "species",
equal_fns = [ tuple_eq_empty_not_eq, tuple_overlaps]))
data_p = pandas.DataFrame(data)
return data_p
def run_analysis_reactions_signatures( graph_1, graph_2, reactions_1 = None, reactions_2 = None):
import pandas
print("%s:%s/%s:run_analysis_reactions_signatures" % (now(), graph_1.name, graph_2.name))
if reactions_1 == None:
print("%s:%s/%s:run_analysis_reactions_signatures:filtering reactions graph_1" % (now(), graph_1.name, graph_2.name))
reactions_1 = filter_reactions( graph_1)
if reactions_2 == None:
print("%s:%s/%s:run_analysis_reactions_signatures:filtering reactions graph_2" % (now(), graph_1.name, graph_2.name))
reactions_2 = filter_reactions( graph_2)
reactions_w_bqbiol_is_1 = [ n for n in reactions_1 if graph_1.node[n]["bqbiol_is"]]
reactions_w_bqbiol_is_2 = [ n for n in reactions_2 if graph_2.node[n]["bqbiol_is"]]
bqbiol_is_1 = [ graph_1.node[n]["bqbiol_is"] for n in reactions_w_bqbiol_is_1]
bqbiol_is_2 = [ graph_2.node[n]["bqbiol_is"] for n in reactions_w_bqbiol_is_2]
data = run_analysis_bqbiol_is_signatures( bqbiol_is_1 = bqbiol_is_1,
bqbiol_is_2 = bqbiol_is_2,
name_1 = graph_1.name, name_2 = graph_2.name, type = "reactions",
equal_fns = [ tuple_eq_empty_not_eq, tuple_overlaps, tuple_overlaps_sbo_is_a])
data_p = pandas.DataFrame(data)
return data_p
def run_analysis_compartments_signatures( graph_1, graph_2, species_1 = None, species_2 = None):
import pandas
print("%s:%s/%s:run_analysis_compartments_signatures" % (now(), graph_1.name, graph_2.name))
if species_1 == None:
print("%s:%s/%s:run_analysis_compartments_signatures:filtering species graph_1" % (now(), graph_1.name, graph_2.name))
species_1 = filter_species( graph_1)
if species_2 == None:
print("%s:%s/%s:run_analysis_compartments_signatures:filtering species graph_2" % (now(), graph_1.name, graph_2.name))
species_2 = filter_species( graph_2)
data = []
print("%s:%s/%s:run_analysis_species_signatures names" % (now(), graph_1.name, graph_2.name))
compartments_1 = set( [species_1.node[s]["compartment"] for s in species_1.nodes() if species_1.node[s]["compartment"]])
compartments_2 = set( [species_2.node[s]["compartment"] for s in species_2.nodes() if species_2.node[s]["compartment"]])
res_1, res_2, precision, recall, f_score = analyse_set_overlap( compartments_1, compartments_2)
print("%s:%s/%s:compartment unique overlap eq: %.2f & %.2f & %.2f precision/recall/fscore" % (now(), graph_1.name, graph_2.name, precision, recall, f_score))
data.append({ "graph_1" : graph_1.name, "graph_2" : graph_2.name, "unique" : True, "type" : "compartment",
"reduction" : "name", "eq" : "eq",
"precision" : precision, "recall" : recall, "f-score" : f_score})
return pandas.DataFrame(data)
def run_analysis_signatures( graph_1, graph_2, export_file = None):
import pandas
print("%s:%s/%s:run_analysis_signatures:started" % (now(), graph_1.name, graph_2.name))
data_s = run_analysis_species_signatures( graph_1, graph_2)
print( "---")
data_r = run_analysis_reactions_signatures( graph_1, graph_2)
print( "---")
data_c = run_analysis_compartments_signatures( graph_1, graph_2)
data = data_s.append(data_r).append(data_c).reset_index()
if export_file:
print("%s:%s/%s:run_analysis_signatures:exporting %s" % (now(), graph_1.name, graph_2.name, export_file))
data.to_pickle( export_file)
return data
########################################################################
########################################################################
# PROCESSING NODE OVERLAP
def run_analyses_overlap( graph_1, graph_2,
species_nm = [ nm_name_equal,
nm_name_clean_equal,
nm_name_clean_approx,
# nm_gene_id_intersect,
nm_bqbiol_is_equal,
nm_bqbiol_is_overlaps],
reactions_nm = [ nm_bqbiol_is_equal,
nm_bqbiol_is_overlaps,
nm_bqbiol_is_overlaps_sbo_is_a],
n_jobs = None,
export_results = True,
export_results_prefix = "results-nodes-overlap-max"):
run_analyses_nodes_overlap_max( filter_species( graph_1),
filter_species( graph_2),
node_match_fns = species_nm,
n_jobs = n_jobs,
export_results = export_results,
export_results_prefix = export_results_prefix);
#### REACTIONS OVERLAP
run_analyses_nodes_overlap_max( filter_reactions( graph_1),
filter_reactions( graph_2),
node_match_fns = reactions_nm,
n_jobs = n_jobs,
export_results = export_results,
export_results_prefix = export_results_prefix);
| apache-2.0 |
bendudson/BOUT | tools/pylib/post_bout/pb_nonlinear.py | 2 | 3020 | #some function to plot nonlinear stuff
from pb_corral import LinRes
from ListDict import ListDictKey, ListDictFilt
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import matplotlib.artist as artist
import matplotlib.ticker as ticker
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.backends.backend_pdf import PdfPages
from reportlab.platypus import *
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import inch
from reportlab.graphics.charts.linecharts import HorizontalLineChart
from reportlab.graphics.shapes import Drawing
from reportlab.graphics.charts.lineplots import LinePlot
from reportlab.graphics.widgets.markers import makeMarker
from reportlab.lib import colors
from replab_x_vs_y import RL_Plot
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter, MultipleLocator
class NLinResDraw(LinRes):
def __init__(self,alldb):
LinRes.__init__(self,alldb)
def plotnlrhs(self,pp,field='Ni',yscale='linear',clip=0,
xaxis='t',xscale='linear',xrange=1):
colors = ['b','g','r','c','m','y','k','b','g','r','c','m','y','k']
Modes = subset(self.db,'field',[field]) #pick field
comp ='ave'
fig1 = plt.figure()
adj = fig1.subplots_adjust(hspace=0.4,wspace=0.4)
fig1.suptitle('Nonlinear contribution for ' + field)
props = dict( alpha=0.8, edgecolors='none')
Nplots = self.nrun
k=0
for j in list(set(Modes.path).union()):
s = subset(Modes.db,'path',[j]) #pick a run folder - many modes
dz = s.dz[0]
data = s.ave[0]['nl']
x = np.array(range(data.size))
ax =fig1.add_subplot(round(Nplots/2.0 + 1.0),2,k+1)
ax.set_ylabel(r'$\frac{ddt_N}{ddt}$',fontsize=12,rotation='horizontal')
k+=1
ax.grid(True,linestyle='-',color='.75')
try:
ax.set_yscale(yscale,linthreshy=1e-13)
except:
ax.set_yscale('linear')
i=1
ax.plot(x,data.flatten(),
c=cm.jet(.2*i),linestyle='-')
#data = np.array(ListDictKey(s.db,comp)) #pick component should be ok for a fixed dz key
# we are not interested in looping over all modes
fig1.savefig(pp,format='pdf')
plt.close(fig1)
#return 0
class subset(NLinResDraw):
def __init__(self,alldb,key,valuelist,model=False):
selection = ListDictFilt(alldb,key,valuelist)
if len(selection) !=0:
LinRes.__init__(self,selection)
self.skey = key
if model==True:
self.model()
else:
LinRes.__init__(self,alldb)
if model==True:
self.model()
| gpl-3.0 |
joshloyal/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
nikitasingh981/scikit-learn | sklearn/externals/joblib/__init__.py | 54 | 5087 | """Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
========================= ================================================
**User documentation:** http://pythonhosted.org/joblib
**Download packages:** http://pypi.python.org/pypi/joblib#downloads
**Source code:** http://github.com/joblib/joblib
**Report issues:** http://github.com/joblib/joblib/issues
========================= ================================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make it easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.11'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
from .parallel import register_parallel_backend
from .parallel import parallel_backend
from .parallel import effective_n_jobs
__all__ = ['Memory', 'MemorizedResult', 'PrintTime', 'Logger', 'hash', 'dump',
'load', 'Parallel', 'delayed', 'cpu_count', 'effective_n_jobs',
'register_parallel_backend', 'parallel_backend']
| bsd-3-clause |
pfnet/chainercv | chainercv/extensions/vis_report/detection_vis_report.py | 3 | 5090 | import copy
import os
import warnings
import chainer
from chainercv.visualizations.vis_bbox import vis_bbox
try:
import matplotlib # NOQA
_available = True
except (ImportError, TypeError):
_available = False
def _check_available():
if not _available:
warnings.warn('matplotlib is not installed on your environment, '
'so nothing will be plotted at this time. '
'Please install matplotlib to plot figures.\n\n'
' $ pip install matplotlib\n')
class DetectionVisReport(chainer.training.extension.Extension):
"""An extension that visualizes output of a detection model.
This extension visualizes the predicted bounding boxes together with the
ground truth bounding boxes.
Internally, this extension takes examples from an iterator,
predict bounding boxes from the images in the examples,
and visualizes them using :meth:`chainercv.visualizations.vis_bbox`.
The process can be illustrated in the following code.
.. code:: python
batch = next(iterator)
# Convert batch -> imgs, gt_bboxes, gt_labels
pred_bboxes, pred_labels, pred_scores = target.predict(imgs)
# Visualization code
for img, gt_bbox, gt_label, pred_bbox, pred_label, pred_score \\
in zip(imgs, gt_boxes, gt_labels,
pred_bboxes, pred_labels, pred_scores):
# the ground truth
vis_bbox(img, gt_bbox, gt_label)
# the prediction
vis_bbox(img, pred_bbox, pred_label, pred_score)
.. note::
:obj:`gt_bbox` and :obj:`pred_bbox` are float arrays
of shape :math:`(R, 4)`, where :math:`R` is the number of
bounding boxes in the image. Each bounding box is organized
by :math:`(y_{min}, x_{min}, y_{max}, x_{max})` in the second axis.
:obj:`gt_label` and :obj:`pred_label` are intenger arrays
of shape :math:`(R,)`. Each label indicates the class of
the bounding box.
:obj:`pred_score` is a float array of shape :math:`(R,)`.
Each score indicates how confident the prediction is.
Args:
iterator: Iterator object that produces images and ground truth.
target: Link object used for detection.
label_names (iterable of strings): Name of labels ordered according
to label ids. If this is :obj:`None`, labels will be skipped.
filename (str): Basename for the saved image. It can contain two
keywords, :obj:`'{iteration}'` and :obj:`'{index}'`. They are
replaced with the iteration of the trainer and the index of
the sample when this extension save an image. The default value is
:obj:`'detection_iter={iteration}_idx={index}.jpg'`.
"""
def __init__(
self, iterator, target, label_names=None,
filename='detection_iter={iteration}_idx={index}.jpg'):
_check_available()
self.iterator = iterator
self.target = target
self.label_names = label_names
self.filename = filename
@staticmethod
def available():
_check_available()
return _available
def __call__(self, trainer):
if _available:
# Dynamically import pyplot so that the backend of matplotlib
# can be configured after importing chainercv.
import matplotlib.pyplot as plt
else:
return
if hasattr(self.iterator, 'reset'):
self.iterator.reset()
it = self.iterator
else:
it = copy.copy(self.iterator)
idx = 0
while True:
try:
batch = next(it)
except StopIteration:
break
imgs = [img for img, _, _ in batch]
pred_bboxes, pred_labels, pred_scores = self.target.predict(imgs)
for (img, gt_bbox, gt_label), pred_bbox, pred_label, pred_score \
in zip(batch, pred_bboxes, pred_labels, pred_scores):
pred_bbox = chainer.backends.cuda.to_cpu(pred_bbox)
pred_label = chainer.backends.cuda.to_cpu(pred_label)
pred_score = chainer.backends.cuda.to_cpu(pred_score)
out_file = self.filename.format(
index=idx, iteration=trainer.updater.iteration)
out_file = os.path.join(trainer.out, out_file)
fig = plt.figure()
ax_gt = fig.add_subplot(2, 1, 1)
ax_gt.set_title('ground truth')
vis_bbox(
img, gt_bbox, gt_label,
label_names=self.label_names, ax=ax_gt)
ax_pred = fig.add_subplot(2, 1, 2)
ax_pred.set_title('prediction')
vis_bbox(
img, pred_bbox, pred_label, pred_score,
label_names=self.label_names, ax=ax_pred)
plt.savefig(out_file, bbox_inches='tight')
plt.close()
idx += 1
| mit |
apache/airflow | tests/providers/vertica/hooks/test_vertica.py | 3 | 3800 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from unittest.mock import patch
from airflow.models import Connection
from airflow.providers.vertica.hooks.vertica import VerticaHook
class TestVerticaHookConn(unittest.TestCase):
def setUp(self):
super().setUp()
self.connection = Connection(
login='login',
password='password',
host='host',
schema='vertica',
)
class UnitTestVerticaHook(VerticaHook):
conn_name_attr = 'vertica_conn_id'
self.db_hook = UnitTestVerticaHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = self.connection
@patch('airflow.providers.vertica.hooks.vertica.connect')
def test_get_conn(self, mock_connect):
self.db_hook.get_conn()
mock_connect.assert_called_once_with(
host='host', port=5433, database='vertica', user='login', password="password"
)
class TestVerticaHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.cur = mock.MagicMock(rowcount=0)
self.conn = mock.MagicMock()
self.conn.cursor.return_value = self.cur
conn = self.conn
class UnitTestVerticaHook(VerticaHook):
conn_name_attr = 'test_conn_id'
def get_conn(self):
return conn
self.db_hook = UnitTestVerticaHook()
@patch('airflow.hooks.dbapi.DbApiHook.insert_rows')
def test_insert_rows(self, mock_insert_rows):
table = "table"
rows = [("hello",), ("world",)]
target_fields = None
commit_every = 10
self.db_hook.insert_rows(table, rows, target_fields, commit_every)
mock_insert_rows.assert_called_once_with(table, rows, None, 10)
def test_get_first_record(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchone.return_value = result_sets[0]
assert result_sets[0] == self.db_hook.get_first(statement)
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_records(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchall.return_value = result_sets
assert result_sets == self.db_hook.get_records(statement)
self.conn.close.assert_called_once_with()
self.cur.close.assert_called_once_with()
self.cur.execute.assert_called_once_with(statement)
def test_get_pandas_df(self):
statement = 'SQL'
column = 'col'
result_sets = [('row1',), ('row2',)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook.get_pandas_df(statement)
assert column == df.columns[0]
assert result_sets[0][0] == df.values.tolist()[0][0]
assert result_sets[1][0] == df.values.tolist()[1][0]
| apache-2.0 |
Huyuwei/tvm | tutorials/frontend/from_mxnet.py | 2 | 5372 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
.. _tutorial-from-mxnet:
Compile MXNet Models
====================
**Author**: `Joshua Z. Zhang <https://zhreshold.github.io/>`_, \
`Kazutaka Morita <https://github.com/kazum>`_
This article is an introductory tutorial to deploy mxnet models with Relay.
For us to begin with, mxnet module is required to be installed.
A quick solution is
.. code-block:: bash
pip install mxnet --user
or please refer to offical installation guide.
https://mxnet.incubator.apache.org/versions/master/install/index.html
"""
# some standard imports
import mxnet as mx
import tvm
import tvm.relay as relay
import numpy as np
######################################################################
# Download Resnet18 model from Gluon Model Zoo
# ---------------------------------------------
# In this section, we download a pretrained imagenet model and classify an image.
from tvm.contrib.download import download_testdata
from mxnet.gluon.model_zoo.vision import get_model
from PIL import Image
from matplotlib import pyplot as plt
block = get_model('resnet18_v1', pretrained=True)
img_url = 'https://github.com/dmlc/mxnet.js/blob/master/data/cat.png?raw=true'
img_name = 'cat.png'
synset_url = ''.join(['https://gist.githubusercontent.com/zhreshold/',
'4d0b62f3d01426887599d4f7ede23ee5/raw/',
'596b27d23537e5a1b5751d2b0481ef172f58b539/',
'imagenet1000_clsid_to_human.txt'])
synset_name = 'imagenet1000_clsid_to_human.txt'
img_path = download_testdata(img_url, 'cat.png', module='data')
synset_path = download_testdata(synset_url, synset_name, module='data')
with open(synset_path) as f:
synset = eval(f.read())
image = Image.open(img_path).resize((224, 224))
plt.imshow(image)
plt.show()
def transform_image(image):
image = np.array(image) - np.array([123., 117., 104.])
image /= np.array([58.395, 57.12, 57.375])
image = image.transpose((2, 0, 1))
image = image[np.newaxis, :]
return image
x = transform_image(image)
print('x', x.shape)
######################################################################
# Compile the Graph
# -----------------
# Now we would like to port the Gluon model to a portable computational graph.
# It's as easy as several lines.
# We support MXNet static graph(symbol) and HybridBlock in mxnet.gluon
shape_dict = {'data': x.shape}
mod, params = relay.frontend.from_mxnet(block, shape_dict)
## we want a probability so add a softmax operator
func = mod["main"]
func = relay.Function(func.params, relay.nn.softmax(func.body), None, func.type_params, func.attrs)
######################################################################
# now compile the graph
target = 'cuda'
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(func, target, params=params)
######################################################################
# Execute the portable graph on TVM
# ---------------------------------
# Now, we would like to reproduce the same forward computation using TVM.
from tvm.contrib import graph_runtime
ctx = tvm.gpu(0)
dtype = 'float32'
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input('data', tvm.nd.array(x.astype(dtype)))
m.set_input(**params)
# execute
m.run()
# get outputs
tvm_output = m.get_output(0)
top1 = np.argmax(tvm_output.asnumpy()[0])
print('TVM prediction top-1:', top1, synset[top1])
######################################################################
# Use MXNet symbol with pretrained weights
# ----------------------------------------
# MXNet often use `arg_params` and `aux_params` to store network parameters
# separately, here we show how to use these weights with existing API
def block2symbol(block):
data = mx.sym.Variable('data')
sym = block(data)
args = {}
auxs = {}
for k, v in block.collect_params().items():
args[k] = mx.nd.array(v.data().asnumpy())
return sym, args, auxs
mx_sym, args, auxs = block2symbol(block)
# usually we would save/load it as checkpoint
mx.model.save_checkpoint('resnet18_v1', 0, mx_sym, args, auxs)
# there are 'resnet18_v1-0000.params' and 'resnet18_v1-symbol.json' on disk
######################################################################
# for a normal mxnet model, we start from here
mx_sym, args, auxs = mx.model.load_checkpoint('resnet18_v1', 0)
# now we use the same API to get Relay computation graph
mod, relay_params = relay.frontend.from_mxnet(mx_sym, shape_dict,
arg_params=args, aux_params=auxs)
# repeat the same steps to run this model using TVM
| apache-2.0 |
markfuge/creativitymetrics | paper_experiments.py | 1 | 9107 | """
paper_experiments.py
Uses other modules to re-produce the results and paper graphs contained
in the paper. Authors wanting to reproduce or compare to our algorithm can
run the experiments by executing:
python paper_experiments.py
from the command line
Paper:
Mark Fuge, Josh Stroud, Alice Agogino. "Automatically Inferring Metrics for Design Creativity," in Proceedings of ASME 2013 International Design Engineering Technical Conferences & Computers and Information in Engineering Conference, August 4-2, 2013, Portland, USA
http://www.markfuge.com/papers/Fuge_DETC2013-12620.pdf
Authors: Josh Stroud and Mark Fuge
"""
import os
import numpy as np
import pylab as pl
from variety_model import *
# Where to save plots
plot_path = "plots/"
# For output plots
def setfont():
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 20}
pl.matplotlib.rc('font', **font)
def genConvergencePlots(metric="shah", numLevels = 4, cover_type = None):
''' Generates various sensitivity plots presented in the paper.
Notably,'''
x = []
y = []
xplots = []
yplots = []
### SCRIPT PARAMETERS
errorCoeffs = [0, 1, 2, 3, 5]
plotFlag = 1
numRepeat = 3 # Amount of resampling - increasing this will improve the
# statistical reliability of the resulting accuracy estimates,
# at the cost of additional computation time.
numSamples = 50 # Sets the x-scale fidelity of the convergence plots (Figs. 3-5)
# Increasing this will increase the number of experiments conducted
# thus increasing run time
# Init some array storage
xmat = np.zeros([numSamples])
ymat = np.zeros([numSamples,len(errorCoeffs)])
yerr = np.zeros([numSamples,len(errorCoeffs)])
ytmat = np.zeros([numSamples,len(errorCoeffs)])
yterr = np.zeros([numSamples,len(errorCoeffs)])
# close any open plot windows
pl.close('all')
if not cover_type:
if metric == 'shah':
cover_type = 'set'
elif metric == 'verhaegen':
cover_type = 'prob'
print "generating convergence plots"
print "using",metric,"variety metric, numLevels =",numLevels
if(cover_type):
print "Cover Type:",cover_type
else:
print "Cover Type: Default"
# One-time generation of all the random tree samples.
# We'll then partion up the dataset such that we use only the required
# fraction of training samples for the model.
max_comparisons = 10000
numConceptsPerTree = 10
# Generates the data
print "Generating Data Samples..."
X,Y,Ytrue = generate_comparison_data(numConceptsPerTree = numConceptsPerTree,
numComparisons = max_comparisons,
metric = metric,
cover_type = cover_type,
E = errorCoeffs,
numLevels = numLevels)
# Now we have generated all of the simulated concept sets, as well as
# All of the noisy ratings and true ratings. We can now run the experiments
print "Running Experiments..."
# This will determine the range of comparisons we will test over.
xmat = np.round(np.linspace(0 , 1500, numSamples+1))
xmat = xmat[1:]
# Runs the model Training and Evaluation
for j, numTraining in enumerate(xmat):
numTraining = int(numTraining)
if(j % 10 == 0):
print "Processing sample",j,"/",numSamples
# Run the model
errScores,gterrScores = runSklearn(X,Y,Ytrue, numTraining,numRetest=numRepeat)
# errScores now contains an array of tuples (mean, std) of the scores across
# numRetest runs of the data
if(plotFlag == 1):
for i,e in enumerate(errorCoeffs):
ymat[j,i] = errScores[i][0]
yerr[j,i] = errScores[i][1]
ytmat[j,i] = gterrScores[i][0]
yterr[j,i] = gterrScores[i][1]
# Print out a sample of accuracy point estimates
print "Final accuracy for metric: "+metric
for i in range(0,numSamples,numSamples/10)[1:]:
print "n: %d\tacc: %.1f"%(xmat[i],100*ymat[i,0])
# Now do the plotting
if(plotFlag == 1):
method_fig = pl.figure(metric+' Training Convergence')
pl.hold(True)
x = xmat
for i,e in enumerate(errorCoeffs):
#pl.plot(x,ymat[:,i],'-',label='E = ' + str(e))
# uncomment for 95% confidence interval
pl.errorbar(x,ymat[:,i],yerr[:,i]*1.96,label=r'$\sigma$'+': ' + str(e))
pl.hold(False)
pl.xlabel("Number of A/B Comparisons used in training")
pl.ylabel("Noisy Label Prediction accuracy")
pl.title(metric+" Training, levels:"+ str(numLevels)+", cover:"+cover_type)
pl.ylim((.5,1.0))
pl.xlim((0,x[-1]))
pl.legend(loc=4,prop={'size':14})
# uncomment below if you want interactive plotting
#pl.show()
pl.savefig(plot_path +
"metric=" + metric +
"_numLevels=" + str(numLevels) +
"_cover=" + cover_type +
"_training_convergence.pdf")
method_fig = pl.figure(metric+' Ground Truth Convergence')
pl.hold(True)
for i,e in enumerate(errorCoeffs):
pl.plot(x,ytmat[:,i],label=r'$\sigma$'+': ' + str(e))
# uncomment for 95% confidence interval
#pl.errorbar(x,ytmat[:,i],yterr[:,i]*1.96,label=r'$\sigma$'+': ' + str(e))
pl.hold(False)
pl.xlabel("Number of A/B Comparisons used in training")
pl.ylabel("Ground Truth Prediction accuracy")
pl.title(metric+" Truth, levels:"+ str(numLevels)+", cover:"+cover_type)
pl.ylim((.5,1.0))
pl.xlim((0,x[-1]))
pl.legend(loc=4,prop={'size':14})
# uncomment below if you want interactive plotting
#pl.show()
pl.savefig(plot_path +
"metric=" + metric +
"_numLevels=" + str(numLevels) +
"_cover=" + cover_type +
"_groundtruth_convergence.pdf")
print "Completed Convergence Experiment!\n"
return xmat,ymat
def genExperimentalResults():
''' Generates the main experimental results and figures used in the paper
'''
shahx,shahy = genConvergencePlots("shah",cover_type="set")
shah_prob_x,shah_prob_y = genConvergencePlots("shah",cover_type="prob")
verhx,verhy = genConvergencePlots("verhaegen",cover_type="prob")
verh_set_x,verh_set_y = genConvergencePlots("verhaegen",cover_type="set")
x = shahx
compare_fig = pl.figure('Convergence of different metrics')
pl.hold(True)
pshah = pl.plot(shahx,shahy[:,0],'k-',label="Shah (Set)",linewidth=3)
pshah_prob = pl.plot(shah_prob_x,shah_prob_y[:,0],'k-',label="Shah (Prob)",linewidth=1)
pverh_set = pl.plot(verh_set_x,verh_set_y[:,0],'b--',label="Verhaegen (Set)",linewidth=3)
pverh = pl.plot(verhx,verhy[:,0],'b--',label="Verhaegen (Prob)",linewidth=1)
pl.hold(False)
pl.xlabel("Number of A/B Comparisons used in training")
pl.ylabel("Prediction accuracy")
pl.title("Comparison of various metrics")
pl.ylim((.5,1.0))
pl.xlim((0,shahx[-1]))
pl.legend(loc=4)
# Uncomment if you want interactive plotting
#pl.show()
pl.savefig(plot_path+"metric_convergence_comparison.pdf")
def genSensitivityResults():
''' Generates sensitivity results regarding number of tree levels and how
increasing the number of estimation parameters affects convergence.
Didn't have space to include these figures in the conference paper.
'''
shahx_a,shahy_a = genConvergencePlots("shah", numLevels=4)
shahx_b,shahy_b = genConvergencePlots("shah", numLevels=10)
shahx_c,shahy_c = genConvergencePlots("shah", numLevels=25)
shahx_d,shahy_d = genConvergencePlots("shah", numLevels=50)
compare_fig = pl.figure('Convergence of different metrics')
pl.hold(True)
shaha = pl.plot(shahx_a,shahy_a[:,0],'k-',label="# Shah Levels = 2",linewidth=3)
shahb = pl.plot(shahx_b,shahy_b[:,0],'k-',label="# Shah Levels = 4",linewidth=1)
shahc = pl.plot(shahx_c,shahy_c[:,0],'b--',label="# Shah Levels = 10",linewidth=3)
shahd = pl.plot(shahx_d,shahy_d[:,0],'b--',label="# Shah Levels = 50",linewidth=1)
pl.hold(False)
pl.xlabel("Number of A/B Comparisons used in training")
pl.ylabel("Prediction accuracy")
pl.title("Comparison of various metrics")
pl.ylim((.5,1.0))
pl.xlim((0,shahx_a[-1]))
pl.legend(loc=4)
# Uncomment if you want interactive plotting
#pl.show()
pl.savefig(plot_path+"sensitivity_convergence_comparison.pdf")
# script below to generate plots
if __name__ == "__main__":
setfont()
if not os.path.exists(plot_path):
os.makedirs(plot_path)
genExperimentalResults()
genSensitivityResults()
| apache-2.0 |
mhogg/scipy | scipy/signal/ltisys.py | 38 | 76123 | """
ltisys -- a collection of classes and functions for modeling linear
time invariant systems.
"""
from __future__ import division, print_function, absolute_import
#
# Author: Travis Oliphant 2001
#
# Feb 2010: Warren Weckesser
# Rewrote lsim2 and added impulse2.
# Aug 2013: Juan Luis Cano
# Rewrote abcd_normalize.
# Jan 2015: Irvin Probst irvin DOT probst AT ensta-bretagne DOT fr
# Added pole placement
# Mar 2015: Clancy Rowley
# Rewrote lsim
# May 2015: Felix Berkenkamp
# Split lti class into subclasses
#
import warnings
import numpy as np
#np.linalg.qr fails on some tests with LinAlgError: zgeqrf returns -7
#use scipy's qr until this is solved
from scipy.linalg import qr as s_qr
import numpy
from numpy import (r_, eye, real, atleast_1d, atleast_2d, poly,
squeeze, asarray, product, zeros, array,
dot, transpose, ones, zeros_like, linspace, nan_to_num)
import copy
from scipy import integrate, interpolate, linalg
from scipy._lib.six import xrange
from .filter_design import tf2zpk, zpk2tf, normalize, freqs
__all__ = ['tf2ss', 'ss2tf', 'abcd_normalize', 'zpk2ss', 'ss2zpk', 'lti',
'TransferFunction', 'ZerosPolesGain', 'StateSpace', 'lsim',
'lsim2', 'impulse', 'impulse2', 'step', 'step2', 'bode',
'freqresp', 'place_poles']
def tf2ss(num, den):
"""Transfer function to state-space representation.
Parameters
----------
num, den : array_like
Sequences representing the numerator and denominator polynomials.
The denominator needs to be at least as long as the numerator.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
"""
# Controller canonical state-space representation.
# if M+1 = len(num) and K+1 = len(den) then we must have M <= K
# states are found by asserting that X(s) = U(s) / D(s)
# then Y(s) = N(s) * X(s)
#
# A, B, C, and D follow quite naturally.
#
num, den = normalize(num, den) # Strips zeros, checks arrays
nn = len(num.shape)
if nn == 1:
num = asarray([num], num.dtype)
M = num.shape[1]
K = len(den)
if M > K:
msg = "Improper transfer function. `num` is longer than `den`."
raise ValueError(msg)
if M == 0 or K == 0: # Null system
return (array([], float), array([], float), array([], float),
array([], float))
# pad numerator to have same number of columns has denominator
num = r_['-1', zeros((num.shape[0], K - M), num.dtype), num]
if num.shape[-1] > 0:
D = num[:, 0]
else:
D = array([], float)
if K == 1:
return array([], float), array([], float), array([], float), D
frow = -array([den[1:]])
A = r_[frow, eye(K - 2, K - 1)]
B = eye(K - 1, 1)
C = num[:, 1:] - num[:, 0] * den[1:]
return A, B, C, D
def _none_to_empty_2d(arg):
if arg is None:
return zeros((0, 0))
else:
return arg
def _atleast_2d_or_none(arg):
if arg is not None:
return atleast_2d(arg)
def _shape_or_none(M):
if M is not None:
return M.shape
else:
return (None,) * 2
def _choice_not_none(*args):
for arg in args:
if arg is not None:
return arg
def _restore(M, shape):
if M.shape == (0, 0):
return zeros(shape)
else:
if M.shape != shape:
raise ValueError("The input arrays have incompatible shapes.")
return M
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are two-dimensional.
If enough information on the system is provided, that is, enough
properly-shaped arrays are passed to the function, the missing ones
are built from this information, ensuring the correct number of
rows and columns. Otherwise a ValueError is raised.
Parameters
----------
A, B, C, D : array_like, optional
State-space matrices. All of them are None (missing) by default.
Returns
-------
A, B, C, D : array
Properly shaped state-space matrices.
Raises
------
ValueError
If not enough information on the system was provided.
"""
A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D))
MA, NA = _shape_or_none(A)
MB, NB = _shape_or_none(B)
MC, NC = _shape_or_none(C)
MD, ND = _shape_or_none(D)
p = _choice_not_none(MA, MB, NC)
q = _choice_not_none(NB, ND)
r = _choice_not_none(MC, MD)
if p is None or q is None or r is None:
raise ValueError("Not enough information on the system.")
A, B, C, D = map(_none_to_empty_2d, (A, B, C, D))
A = _restore(A, (p, p))
B = _restore(B, (p, q))
C = _restore(C, (r, p))
D = _restore(D, (r, q))
return A, B, C, D
def ss2tf(A, B, C, D, input=0):
"""State-space to transfer function.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
num : 2-D ndarray
Numerator(s) of the resulting transfer function(s). `num` has one row
for each of the system's outputs. Each row is a sequence representation
of the numerator polynomial.
den : 1-D ndarray
Denominator of the resulting transfer function(s). `den` is a sequence
representation of the denominator polynomial.
"""
# transfer function is C (sI - A)**(-1) B + D
A, B, C, D = map(asarray, (A, B, C, D))
# Check consistency and make them all rank-2 arrays
A, B, C, D = abcd_normalize(A, B, C, D)
nout, nin = D.shape
if input >= nin:
raise ValueError("System does not have the input specified.")
# make MOSI from possibly MOMI system.
B = B[:, input:input + 1]
D = D[:, input:input + 1]
try:
den = poly(A)
except ValueError:
den = 1
if (product(B.shape, axis=0) == 0) and (product(C.shape, axis=0) == 0):
num = numpy.ravel(D)
if (product(D.shape, axis=0) == 0) and (product(A.shape, axis=0) == 0):
den = []
return num, den
num_states = A.shape[0]
type_test = A[:, 0] + B[:, 0] + C[0, :] + D
num = numpy.zeros((nout, num_states + 1), type_test.dtype)
for k in range(nout):
Ck = atleast_2d(C[k, :])
num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den
return num, den
def zpk2ss(z, p, k):
"""Zero-pole-gain representation to state-space representation
Parameters
----------
z, p : sequence
Zeros and poles.
k : float
System gain.
Returns
-------
A, B, C, D : ndarray
State space representation of the system, in controller canonical
form.
"""
return tf2ss(*zpk2tf(z, p, k))
def ss2zpk(A, B, C, D, input=0):
"""State-space representation to zero-pole-gain representation.
Parameters
----------
A, B, C, D : ndarray
State-space representation of linear system.
input : int, optional
For multiple-input systems, the input to use.
Returns
-------
z, p : sequence
Zeros and poles.
k : float
System gain.
"""
return tf2zpk(*ss2tf(A, B, C, D, input=input))
class lti(object):
"""
Linear Time Invariant system base class.
Parameters
----------
*system : arguments
The `lti` class can be instantiated with either 2, 3 or 4 arguments.
The following gives the number of arguments and the corresponding
subclass that is created:
* 2: `TransferFunction`: (numerator, denominator)
* 3: `ZerosPolesGain`: (zeros, poles, gain)
* 4: `StateSpace`: (A, B, C, D)
Each argument can be an array or a sequence.
Notes
-----
`lti` instances do not exist directly. Instead, `lti` creates an instance
of one of its subclasses: `StateSpace`, `TransferFunction` or
`ZerosPolesGain`.
Changing the value of properties that are not directly part of the current
system representation (such as the `zeros` of a `StateSpace` system) is
very inefficient and may lead to numerical inaccuracies.
"""
def __new__(cls, *system):
"""Create an instance of the appropriate subclass."""
if cls is lti:
N = len(system)
if N == 2:
return super(lti, cls).__new__(TransferFunction)
elif N == 3:
return super(lti, cls).__new__(ZerosPolesGain)
elif N == 4:
return super(lti, cls).__new__(StateSpace)
else:
raise ValueError('Needs 2, 3 or 4 arguments.')
# __new__ was called from a subclass, let it call its own functions
return super(lti, cls).__new__(cls)
def __init__(self, *system):
"""
Initialize the `lti` baseclass.
The heavy lifting is done by the subclasses.
"""
self.inputs = None
self.outputs = None
@property
def num(self):
"""Numerator of the `TransferFunction` system."""
return self.to_tf().num
@num.setter
def num(self, num):
obj = self.to_tf()
obj.num = num
source_class = type(self)
self._copy(source_class(obj))
@property
def den(self):
"""Denominator of the `TransferFunction` system."""
return self.to_tf().den
@den.setter
def den(self, den):
obj = self.to_tf()
obj.den = den
source_class = type(self)
self._copy(source_class(obj))
@property
def zeros(self):
"""Zeros of the `ZerosPolesGain` system."""
return self.to_zpk().zeros
@zeros.setter
def zeros(self, zeros):
obj = self.to_zpk()
obj.zeros = zeros
source_class = type(self)
self._copy(source_class(obj))
@property
def poles(self):
"""Poles of the `ZerosPolesGain` system."""
return self.to_zpk().poles
@poles.setter
def poles(self, poles):
obj = self.to_zpk()
obj.poles = poles
source_class = type(self)
self._copy(source_class(obj))
@property
def gain(self):
"""Gain of the `ZerosPolesGain` system."""
return self.to_zpk().gain
@gain.setter
def gain(self, gain):
obj = self.to_zpk()
obj.gain = gain
source_class = type(self)
self._copy(source_class(obj))
@property
def A(self):
"""A matrix of the `StateSpace` system."""
return self.to_ss().A
@A.setter
def A(self, A):
obj = self.to_ss()
obj.A = A
source_class = type(self)
self._copy(source_class(obj))
@property
def B(self):
"""B matrix of the `StateSpace` system."""
return self.to_ss().B
@B.setter
def B(self, B):
obj = self.to_ss()
obj.B = B
source_class = type(self)
self._copy(source_class(obj))
@property
def C(self):
"""C matrix of the `StateSpace` system."""
return self.to_ss().C
@C.setter
def C(self, C):
obj = self.to_ss()
obj.C = C
source_class = type(self)
self._copy(source_class(obj))
@property
def D(self):
"""D matrix of the `StateSpace` system."""
return self.to_ss().D
@D.setter
def D(self, D):
obj = self.to_ss()
obj.D = D
source_class = type(self)
self._copy(source_class(obj))
def impulse(self, X0=None, T=None, N=None):
"""
Return the impulse response of a continuous-time system.
See `scipy.signal.impulse` for details.
"""
return impulse(self, X0=X0, T=T, N=N)
def step(self, X0=None, T=None, N=None):
"""
Return the step response of a continuous-time system.
See `scipy.signal.step` for details.
"""
return step(self, X0=X0, T=T, N=N)
def output(self, U, T, X0=None):
"""
Return the response of a continuous-time system to input `U`.
See `scipy.signal.lsim` for details.
"""
return lsim(self, U, T, X0=X0)
def bode(self, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Returns a 3-tuple containing arrays of frequencies [rad/s], magnitude
[dB] and phase [deg]. See `scipy.signal.bode` for details.
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([1], [1, 1])
>>> w, mag, phase = s1.bode()
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
return bode(self, w=w, n=n)
def freqresp(self, w=None, n=10000):
"""
Calculate the frequency response of a continuous-time system.
Returns a 2-tuple containing arrays of frequencies [rad/s] and
complex magnitude.
See `scipy.signal.freqresp` for details.
"""
return freqresp(self, w=w, n=n)
class TransferFunction(lti):
"""Linear Time Invariant system class in transfer function form.
Represents the system as the transfer function
:math:`H(s)=\sum_i b[i] s^i / \sum_j a[j] s^i`, where :math:`a` are
elements of the numerator `num` and :math:`b` are the elements of the
denominator `den`.
Parameters
----------
*system : arguments
The `TransferFunction` class can be instantiated with 1 or 2 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 2: array_like: (numerator, denominator)
Notes
-----
Changing the value of properties that are not part of the
`TransferFunction` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
"""
def __new__(cls, *system):
"""Handle object conversion if input is an instance of lti."""
if len(system) == 1 and isinstance(system[0], lti):
return system[0].to_tf()
# No special conversion needed
return super(TransferFunction, cls).__new__(cls)
def __init__(self, *system):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], lti):
return
super(TransferFunction, self).__init__(self, *system)
self._num = None
self._den = None
self.num, self.den = normalize(*system)
def __repr__(self):
"""Return representation of the system's transfer function"""
return '{0}(\n{1},\n{2}\n)'.format(
self.__class__.__name__,
repr(self.num),
repr(self.den),
)
@property
def num(self):
return self._num
@num.setter
def num(self, num):
self._num = atleast_1d(num)
# Update dimensions
if len(self.num.shape) > 1:
self.outputs, self.inputs = self.num.shape
else:
self.outputs = 1
self.inputs = 1
@property
def den(self):
return self._den
@den.setter
def den(self, den):
self._den = atleast_1d(den)
def _copy(self, system):
"""
Copy the parameters of another `TransferFunction` object
Parameters
----------
system : `TransferFunction`
The `StateSpace` system that is to be copied
"""
self.num = system.num
self.den = system.den
def to_tf(self):
"""
Return a copy of the current `TransferFunction` system.
Returns
-------
sys : instance of `TransferFunction`
The current system (copy)
"""
return copy.deepcopy(self)
def to_zpk(self):
"""
Convert system representation to `ZerosPolesGain`.
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*tf2zpk(self.num, self.den))
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*tf2ss(self.num, self.den))
class ZerosPolesGain(lti):
"""
Linear Time Invariant system class in zeros, poles, gain form.
Represents the system as the transfer function
:math:`H(s)=k \prod_i (s - z[i]) / \prod_j (s - p[j])`, where :math:`k` is
the `gain`, :math:`z` are the `zeros` and :math:`p` are the `poles`.
Parameters
----------
*system : arguments
The `ZerosPolesGain` class can be instantiated with 1 or 3 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 3: array_like: (zeros, poles, gain)
Notes
-----
Changing the value of properties that are not part of the
`ZerosPolesGain` system representation (such as the `A`, `B`, `C`, `D`
state-space matrices) is very inefficient and may lead to numerical
inaccuracies.
"""
def __new__(cls, *system):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], lti):
return system[0].to_zpk()
# No special conversion needed
return super(ZerosPolesGain, cls).__new__(cls)
def __init__(self, *system):
"""Initialize the zeros, poles, gain LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], lti):
return
super(ZerosPolesGain, self).__init__(self, *system)
self._zeros = None
self._poles = None
self._gain = None
self.zeros, self.poles, self.gain = system
def __repr__(self):
"""Return representation of the `ZerosPolesGain` system"""
return '{0}(\n{1},\n{2},\n{3}\n)'.format(
self.__class__.__name__,
repr(self.zeros),
repr(self.poles),
repr(self.gain),
)
@property
def zeros(self):
return self._zeros
@zeros.setter
def zeros(self, zeros):
self._zeros = atleast_1d(zeros)
# Update dimensions
if len(self.zeros.shape) > 1:
self.outputs, self.inputs = self.zeros.shape
else:
self.outputs = 1
self.inputs = 1
@property
def poles(self):
return self._poles
@poles.setter
def poles(self, poles):
self._poles = atleast_1d(poles)
@property
def gain(self):
return self._gain
@gain.setter
def gain(self, gain):
self._gain = gain
def _copy(self, system):
"""
Copy the parameters of another `ZerosPolesGain` system.
Parameters
----------
system : instance of `ZerosPolesGain`
The zeros, poles gain system that is to be copied
"""
self.poles = system.poles
self.zeros = system.zeros
self.gain = system.gain
def to_tf(self):
"""
Convert system representation to `TransferFunction`.
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*zpk2tf(self.zeros, self.poles, self.gain))
def to_zpk(self):
"""
Return a copy of the current 'ZerosPolesGain' system.
Returns
-------
sys : instance of `ZerosPolesGain`
The current system (copy)
"""
return copy.deepcopy(self)
def to_ss(self):
"""
Convert system representation to `StateSpace`.
Returns
-------
sys : instance of `StateSpace`
State space model of the current system
"""
return StateSpace(*zpk2ss(self.zeros, self.poles, self.gain))
class StateSpace(lti):
"""
Linear Time Invariant system class in state-space form.
Represents the system as the first order differential equation
:math:`\dot{x} = A x + B u`.
Parameters
----------
*system : arguments
The `StateSpace` class can be instantiated with 1 or 4 arguments.
The following gives the number of input arguments and their
interpretation:
* 1: `lti` system: (`StateSpace`, `TransferFunction` or
`ZerosPolesGain`)
* 4: array_like: (A, B, C, D)
Notes
-----
Changing the value of properties that are not part of the
`StateSpace` system representation (such as `zeros` or `poles`) is very
inefficient and may lead to numerical inaccuracies.
"""
def __new__(cls, *system):
"""Handle object conversion if input is an instance of `lti`"""
if len(system) == 1 and isinstance(system[0], lti):
return system[0].to_ss()
# No special conversion needed
return super(StateSpace, cls).__new__(cls)
def __init__(self, *system):
"""Initialize the state space LTI system."""
# Conversion of lti instances is handled in __new__
if isinstance(system[0], lti):
return
super(StateSpace, self).__init__(self, *system)
self._A = None
self._B = None
self._C = None
self._D = None
self.A, self.B, self.C, self.D = abcd_normalize(*system)
def __repr__(self):
"""Return representation of the `StateSpace` system."""
return '{0}(\n{1},\n{2},\n{3},\n{4}\n)'.format(
self.__class__.__name__,
repr(self.A),
repr(self.B),
repr(self.C),
repr(self.D),
)
@property
def A(self):
return self._A
@A.setter
def A(self, A):
self._A = _atleast_2d_or_none(A)
@property
def B(self):
return self._B
@B.setter
def B(self, B):
self._B = _atleast_2d_or_none(B)
self.inputs = self.B.shape[-1]
@property
def C(self):
return self._C
@C.setter
def C(self, C):
self._C = _atleast_2d_or_none(C)
self.outputs = self.C.shape[0]
@property
def D(self):
return self._D
@D.setter
def D(self, D):
self._D = _atleast_2d_or_none(D)
def _copy(self, system):
"""
Copy the parameters of another `StateSpace` system.
Parameters
----------
system : instance of `StateSpace`
The state-space system that is to be copied
"""
self.A = system.A
self.B = system.B
self.C = system.C
self.D = system.D
def to_tf(self, **kwargs):
"""
Convert system representation to `TransferFunction`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `TransferFunction`
Transfer function of the current system
"""
return TransferFunction(*ss2tf(self._A, self._B, self._C, self._D,
**kwargs))
def to_zpk(self, **kwargs):
"""
Convert system representation to `ZerosPolesGain`.
Parameters
----------
kwargs : dict, optional
Additional keywords passed to `ss2zpk`
Returns
-------
sys : instance of `ZerosPolesGain`
Zeros, poles, gain representation of the current system
"""
return ZerosPolesGain(*ss2zpk(self._A, self._B, self._C, self._D,
**kwargs))
def to_ss(self):
"""
Return a copy of the current `StateSpace` system.
Returns
-------
sys : instance of `StateSpace`
The current system (copy)
"""
return copy.deepcopy(self)
def lsim2(system, U=None, T=None, X0=None, **kwargs):
"""
Simulate output of a continuous-time linear system, by using
the ODE solver `scipy.integrate.odeint`.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like (1D or 2D), optional
An input array describing the input at each time T. Linear
interpolation is used between given times. If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U is not given, the input is assumed
to be zero.
T : array_like (1D or 2D), optional
The time steps at which the input is defined and at which the
output is desired. The default is 101 evenly spaced points on
the interval [0,10.0].
X0 : array_like (1D), optional
The initial condition of the state vector. If `X0` is not
given, the initial conditions are assumed to be 0.
kwargs : dict
Additional keyword arguments are passed on to the function
`odeint`. See the notes below for more details.
Returns
-------
T : 1D ndarray
The time values for the output.
yout : ndarray
The response of the system.
xout : ndarray
The time-evolution of the state-vector.
Notes
-----
This function uses `scipy.integrate.odeint` to solve the
system's differential equations. Additional keyword arguments
given to `lsim2` are passed on to `odeint`. See the documentation
for `scipy.integrate.odeint` for the full list of arguments.
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if X0 is None:
X0 = zeros(sys.B.shape[0], sys.A.dtype)
if T is None:
# XXX T should really be a required argument, but U was
# changed from a required positional argument to a keyword,
# and T is after U in the argument list. So we either: change
# the API and move T in front of U; check here for T being
# None and raise an exception; or assign a default value to T
# here. This code implements the latter.
T = linspace(0, 10.0, 101)
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
if U is not None:
U = atleast_1d(U)
if len(U.shape) == 1:
U = U.reshape(-1, 1)
sU = U.shape
if sU[0] != len(T):
raise ValueError("U must have the same number of rows "
"as elements in T.")
if sU[1] != sys.inputs:
raise ValueError("The number of inputs in U (%d) is not "
"compatible with the number of system "
"inputs (%d)" % (sU[1], sys.inputs))
# Create a callable that uses linear interpolation to
# calculate the input at any time.
ufunc = interpolate.interp1d(T, U, kind='linear',
axis=0, bounds_error=False)
def fprime(x, t, sys, ufunc):
"""The vector field of the linear system."""
return dot(sys.A, x) + squeeze(dot(sys.B, nan_to_num(ufunc([t]))))
xout = integrate.odeint(fprime, X0, T, args=(sys, ufunc), **kwargs)
yout = dot(sys.C, transpose(xout)) + dot(sys.D, transpose(U))
else:
def fprime(x, t, sys):
"""The vector field of the linear system."""
return dot(sys.A, x)
xout = integrate.odeint(fprime, X0, T, args=(sys,), **kwargs)
yout = dot(sys.C, transpose(xout))
return T, squeeze(transpose(yout)), xout
def _cast_to_array_dtype(in1, in2):
"""Cast array to dtype of other array, while avoiding ComplexWarning.
Those can be raised when casting complex to real.
"""
if numpy.issubdtype(in2.dtype, numpy.float):
# dtype to cast to is not complex, so use .real
in1 = in1.real.astype(in2.dtype)
else:
in1 = in1.astype(in2.dtype)
return in1
def lsim(system, U, T, X0=None, interp=True):
"""
Simulate output of a continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2: (num, den)
* 3: (zeros, poles, gain)
* 4: (A, B, C, D)
U : array_like
An input array describing the input at each time `T`
(interpolation is assumed between given times). If there are
multiple inputs, then each column of the rank-2 array
represents an input. If U = 0 or None, a zero input is used.
T : array_like
The time steps at which the input is defined and at which the
output is desired. Must be nonnegative, increasing, and equally spaced.
X0 : array_like, optional
The initial conditions on the state vector (zero by default).
interp : bool, optional
Whether to use linear (True, the default) or zero-order-hold (False)
interpolation for the input array.
Returns
-------
T : 1D ndarray
Time values for the output.
yout : 1D ndarray
System response.
xout : ndarray
Time evolution of the state vector.
Examples
--------
Simulate a double integrator y'' = u, with a constant input u = 1
>>> from scipy import signal
>>> system = signal.lti([[0., 1.], [0., 0.]], [[0.], [1.]], [[1., 0.]], 0.)
>>> t = np.linspace(0, 5)
>>> u = np.ones_like(t)
>>> tout, y, x = signal.lsim(system, u, t)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
T = atleast_1d(T)
if len(T.shape) != 1:
raise ValueError("T must be a rank-1 array.")
A, B, C, D = map(np.asarray, (sys.A, sys.B, sys.C, sys.D))
n_states = A.shape[0]
n_inputs = B.shape[1]
n_steps = T.size
if X0 is None:
X0 = zeros(n_states, sys.A.dtype)
xout = zeros((n_steps, n_states), sys.A.dtype)
if T[0] == 0:
xout[0] = X0
elif T[0] > 0:
# step forward to initial time, with zero input
xout[0] = dot(X0, linalg.expm(transpose(A) * T[0]))
else:
raise ValueError("Initial time must be nonnegative")
no_input = (U is None
or (isinstance(U, (int, float)) and U == 0.)
or not np.any(U))
if n_steps == 1:
yout = squeeze(dot(xout, transpose(C)))
if not no_input:
yout += squeeze(dot(U, transpose(D)))
return T, squeeze(yout), squeeze(xout)
dt = T[1] - T[0]
if not np.allclose((T[1:] - T[:-1]) / dt, 1.0):
warnings.warn("Non-uniform timesteps are deprecated. Results may be "
"slow and/or inaccurate.", DeprecationWarning)
return lsim2(system, U, T, X0)
if no_input:
# Zero input: just use matrix exponential
# take transpose because state is a row vector
expAT_dt = linalg.expm(transpose(A) * dt)
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], expAT_dt)
yout = squeeze(dot(xout, transpose(C)))
return T, squeeze(yout), squeeze(xout)
# Nonzero input
U = atleast_1d(U)
if U.ndim == 1:
U = U[:, np.newaxis]
if U.shape[0] != n_steps:
raise ValueError("U must have the same number of rows "
"as elements in T.")
if U.shape[1] != n_inputs:
raise ValueError("System does not define that many inputs.")
if not interp:
# Zero-order hold
# Algorithm: to integrate from time 0 to time dt, we solve
# xdot = A x + B u, x(0) = x0
# udot = 0, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 ] [ u0 ]
M = np.vstack([np.hstack([A * dt, B * dt]),
np.zeros((n_inputs, n_states + n_inputs))])
# transpose everything because the state and input are row vectors
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd = expMT[n_states:, :n_states]
for i in xrange(1, n_steps):
xout[i] = dot(xout[i-1], Ad) + dot(U[i-1], Bd)
else:
# Linear interpolation between steps
# Algorithm: to integrate from time 0 to time dt, with linear
# interpolation between inputs u(0) = u0 and u(dt) = u1, we solve
# xdot = A x + B u, x(0) = x0
# udot = (u1 - u0) / dt, u(0) = u0.
#
# Solution is
# [ x(dt) ] [ A*dt B*dt 0 ] [ x0 ]
# [ u(dt) ] = exp [ 0 0 I ] [ u0 ]
# [u1 - u0] [ 0 0 0 ] [u1 - u0]
M = np.vstack([np.hstack([A * dt, B * dt,
np.zeros((n_states, n_inputs))]),
np.hstack([np.zeros((n_inputs, n_states + n_inputs)),
np.identity(n_inputs)]),
np.zeros((n_inputs, n_states + 2 * n_inputs))])
expMT = linalg.expm(transpose(M))
Ad = expMT[:n_states, :n_states]
Bd1 = expMT[n_states+n_inputs:, :n_states]
Bd0 = expMT[n_states:n_states + n_inputs, :n_states] - Bd1
for i in xrange(1, n_steps):
xout[i] = (dot(xout[i-1], Ad) + dot(U[i-1], Bd0) + dot(U[i], Bd1))
yout = (squeeze(dot(xout, transpose(C))) + squeeze(dot(U, transpose(D))))
return T, squeeze(yout), squeeze(xout)
def _default_response_times(A, n):
"""Compute a reasonable set of time samples for the response time.
This function is used by `impulse`, `impulse2`, `step` and `step2`
to compute the response time when the `T` argument to the function
is None.
Parameters
----------
A : ndarray
The system matrix, which is square.
n : int
The number of time samples to generate.
Returns
-------
t : ndarray
The 1-D array of length `n` of time samples at which the response
is to be computed.
"""
# Create a reasonable time interval.
# TODO: This could use some more work.
# For example, what is expected when the system is unstable?
vals = linalg.eigvals(A)
r = min(abs(real(vals)))
if r == 0.0:
r = 1.0
tc = 1.0 / r
t = linspace(0.0, 7 * tc, n)
return t
def impulse(system, X0=None, T=None, N=None):
"""Impulse response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector. Defaults to zero.
T : array_like, optional
Time points. Computed if not given.
N : int, optional
The number of time points to compute (if `T` is not given).
Returns
-------
T : ndarray
A 1-D array of time points.
yout : ndarray
A 1-D array containing the impulse response of the system (except for
singularities at zero).
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if X0 is None:
X = squeeze(sys.B)
else:
X = squeeze(sys.B + X0)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
_, h, _ = lsim(sys, 0., T, X, interp=False)
return T, h
def impulse2(system, X0=None, T=None, N=None, **kwargs):
"""
Impulse response of a single-input, continuous-time linear system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : 1-D array_like, optional
The initial condition of the state vector. Default: 0 (the
zero vector).
T : 1-D array_like, optional
The time steps at which the input is defined and at which the
output is desired. If `T` is not given, the function will
generate a set of time samples automatically.
N : int, optional
Number of time points to compute. Default: 100.
kwargs : various types
Additional keyword arguments are passed on to the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`; see the latter's documentation for
information about these arguments.
Returns
-------
T : ndarray
The time values for the output.
yout : ndarray
The output response of the system.
See Also
--------
impulse, lsim2, integrate.odeint
Notes
-----
The solution is generated by calling `scipy.signal.lsim2`, which uses
the differential equation solver `scipy.integrate.odeint`.
.. versionadded:: 0.8.0
Examples
--------
Second order system with a repeated root: x''(t) + 2*x(t) + x(t) = u(t)
>>> from scipy import signal
>>> system = ([1.0], [1.0, 2.0, 1.0])
>>> t, y = signal.impulse2(system)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, y)
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
B = sys.B
if B.shape[-1] != 1:
raise ValueError("impulse2() requires a single-input system.")
B = B.squeeze()
if X0 is None:
X0 = zeros_like(B)
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
# Move the impulse in the input to the initial conditions, and then
# solve using lsim2().
ic = B + X0
Tr, Yr, Xr = lsim2(sys, T=T, X0=ic, **kwargs)
return Tr, Yr
def step(system, X0=None, T=None, N=None):
"""Step response of continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step2
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim(sys, U, T, X0=X0, interp=False)
return vals[0], vals[1]
def step2(system, X0=None, T=None, N=None, **kwargs):
"""Step response of continuous-time system.
This function is functionally the same as `scipy.signal.step`, but
it uses the function `scipy.signal.lsim2` to compute the step
response.
Parameters
----------
system : an instance of the LTI class or a tuple of array_like
describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
X0 : array_like, optional
Initial state-vector (default is zero).
T : array_like, optional
Time points (computed if not given).
N : int, optional
Number of time points to compute if `T` is not given.
kwargs : various types
Additional keyword arguments are passed on the function
`scipy.signal.lsim2`, which in turn passes them on to
`scipy.integrate.odeint`. See the documentation for
`scipy.integrate.odeint` for information about these arguments.
Returns
-------
T : 1D ndarray
Output time points.
yout : 1D ndarray
Step response of system.
See also
--------
scipy.signal.step
Notes
-----
.. versionadded:: 0.8.0
"""
if isinstance(system, lti):
sys = system.to_ss()
else:
sys = lti(*system).to_ss()
if N is None:
N = 100
if T is None:
T = _default_response_times(sys.A, N)
else:
T = asarray(T)
U = ones(T.shape, sys.A.dtype)
vals = lsim2(sys, U, T, X0=X0, **kwargs)
return vals[0], vals[1]
def bode(system, w=None, n=100):
"""
Calculate Bode magnitude and phase data of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is calculated
for every value in this array. If not given a reasonable set will be
calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
mag : 1D ndarray
Magnitude array [dB]
phase : 1D ndarray
Phase array [deg]
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([1], [1, 1])
>>> w, mag, phase = signal.bode(s1)
>>> plt.figure()
>>> plt.semilogx(w, mag) # Bode magnitude plot
>>> plt.figure()
>>> plt.semilogx(w, phase) # Bode phase plot
>>> plt.show()
"""
w, y = freqresp(system, w=w, n=n)
mag = 20.0 * numpy.log10(abs(y))
phase = numpy.unwrap(numpy.arctan2(y.imag, y.real)) * 180.0 / numpy.pi
return w, mag, phase
def freqresp(system, w=None, n=10000):
"""Calculate the frequency response of a continuous-time system.
Parameters
----------
system : an instance of the LTI class or a tuple describing the system.
The following gives the number of elements in the tuple and
the interpretation:
* 2 (num, den)
* 3 (zeros, poles, gain)
* 4 (A, B, C, D)
w : array_like, optional
Array of frequencies (in rad/s). Magnitude and phase data is
calculated for every value in this array. If not given a reasonable
set will be calculated.
n : int, optional
Number of frequency points to compute if `w` is not given. The `n`
frequencies are logarithmically spaced in an interval chosen to
include the influence of the poles and zeros of the system.
Returns
-------
w : 1D ndarray
Frequency array [rad/s]
H : 1D ndarray
Array of complex magnitude values
Examples
--------
# Generating the Nyquist plot of a transfer function
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> s1 = signal.lti([], [1, 1, 1], [5])
# transfer function: H(s) = 5 / (s-1)^3
>>> w, H = signal.freqresp(s1)
>>> plt.figure()
>>> plt.plot(H.real, H.imag, "b")
>>> plt.plot(H.real, -H.imag, "r")
>>> plt.show()
"""
if isinstance(system, lti):
sys = system.to_tf()
else:
sys = lti(*system).to_tf()
if sys.inputs != 1 or sys.outputs != 1:
raise ValueError("freqresp() requires a SISO (single input, single "
"output) system.")
if w is not None:
worN = w
else:
worN = n
# In the call to freqs(), sys.num.ravel() is used because there are
# cases where sys.num is a 2-D array with a single row.
w, h = freqs(sys.num.ravel(), sys.den, worN=worN)
return w, h
# This class will be used by place_poles to return its results
# see http://code.activestate.com/recipes/52308/
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
def _valid_inputs(A, B, poles, method, rtol, maxiter):
"""
Check the poles come in complex conjugage pairs
Check shapes of A, B and poles are compatible.
Check the method chosen is compatible with provided poles
Return update method to use and ordered poles
"""
poles = np.asarray(poles)
if poles.ndim > 1:
raise ValueError("Poles must be a 1D array like.")
# Will raise ValueError if poles do not come in complex conjugates pairs
poles = _order_complex_poles(poles)
if A.ndim > 2:
raise ValueError("A must be a 2D array/matrix.")
if B.ndim > 2:
raise ValueError("B must be a 2D array/matrix")
if A.shape[0] != A.shape[1]:
raise ValueError("A must be square")
if len(poles) > A.shape[0]:
raise ValueError("maximum number of poles is %d but you asked for %d" %
(A.shape[0], len(poles)))
if len(poles) < A.shape[0]:
raise ValueError("number of poles is %d but you should provide %d" %
(len(poles), A.shape[0]))
r = np.linalg.matrix_rank(B)
for p in poles:
if sum(p == poles) > r:
raise ValueError("at least one of the requested pole is repeated "
"more than rank(B) times")
# Choose update method
update_loop = _YT_loop
if method not in ('KNV0','YT'):
raise ValueError("The method keyword must be one of 'YT' or 'KNV0'")
if method == "KNV0":
update_loop = _KNV0_loop
if not all(np.isreal(poles)):
raise ValueError("Complex poles are not supported by KNV0")
if maxiter < 1:
raise ValueError("maxiter must be at least equal to 1")
# We do not check rtol <= 0 as the user can use a negative rtol to
# force maxiter iterations
if rtol > 1:
raise ValueError("rtol can not be greater than 1")
return update_loop, poles
def _order_complex_poles(poles):
"""
Check we have complex conjugates pairs and reorder P according to YT, ie
real_poles, complex_i, conjugate complex_i, ....
The lexicographic sort on the complex poles is added to help the user to
compare sets of poles.
"""
ordered_poles = np.sort(poles[np.isreal(poles)])
im_poles = []
for p in np.sort(poles[np.imag(poles) < 0]):
if np.conj(p) in poles:
im_poles.extend((p, np.conj(p)))
ordered_poles = np.hstack((ordered_poles, im_poles))
if poles.shape[0] != len(ordered_poles):
raise ValueError("Complex poles must come with their conjugates")
return ordered_poles
def _KNV0(B, ker_pole, transfer_matrix, j, poles):
"""
Algorithm "KNV0" Kautsky et Al. Robust pole
assignment in linear state feedback, Int journal of Control
1985, vol 41 p 1129->1155
http://la.epfl.ch/files/content/sites/la/files/
users/105941/public/KautskyNicholsDooren
"""
# Remove xj form the base
transfer_matrix_not_j = np.delete(transfer_matrix, j, axis=1)
# If we QR this matrix in full mode Q=Q0|Q1
# then Q1 will be a single column orthogonnal to
# Q0, that's what we are looking for !
# After merge of gh-4249 great speed improvements could be achieved
# using QR updates instead of full QR in the line below
# To debug with numpy qr uncomment the line below
# Q, R = np.linalg.qr(transfer_matrix_not_j, mode="complete")
Q, R = s_qr(transfer_matrix_not_j, mode="full")
mat_ker_pj = np.dot(ker_pole[j], ker_pole[j].T)
yj = np.dot(mat_ker_pj, Q[:, -1])
# If Q[:, -1] is "almost" orthogonal to ker_pole[j] its
# projection into ker_pole[j] will yield a vector
# close to 0. As we are looking for a vector in ker_pole[j]
# simply stick with transfer_matrix[:, j] (unless someone provides me with
# a better choice ?)
if not np.allclose(yj, 0):
xj = yj/np.linalg.norm(yj)
transfer_matrix[:, j] = xj
# KNV does not support complex poles, using YT technique the two lines
# below seem to work 9 out of 10 times but it is not reliable enough:
# transfer_matrix[:, j]=real(xj)
# transfer_matrix[:, j+1]=imag(xj)
# Add this at the beginning of this function if you wish to test
# complex support:
# if ~np.isreal(P[j]) and (j>=B.shape[0]-1 or P[j]!=np.conj(P[j+1])):
# return
# Problems arise when imag(xj)=>0 I have no idea on how to fix this
def _YT_real(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.1 page 19 related to real pairs
"""
# step 1 page 19
u = Q[:, -2, np.newaxis]
v = Q[:, -1, np.newaxis]
# step 2 page 19
m = np.dot(np.dot(ker_pole[i].T, np.dot(u, v.T) -
np.dot(v, u.T)), ker_pole[j])
# step 3 page 19
um, sm, vm = np.linalg.svd(m)
# mu1, mu2 two first columns of U => 2 first lines of U.T
mu1, mu2 = um.T[:2, :, np.newaxis]
# VM is V.T with numpy we want the first two lines of V.T
nu1, nu2 = vm[:2, :, np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
transfer_matrix_j_mo_transfer_matrix_j = np.vstack((
transfer_matrix[:, i, np.newaxis],
transfer_matrix[:, j, np.newaxis]))
if not np.allclose(sm[0], sm[1]):
ker_pole_imo_mu1 = np.dot(ker_pole[i], mu1)
ker_pole_i_nu1 = np.dot(ker_pole[j], nu1)
ker_pole_mu_nu = np.vstack((ker_pole_imo_mu1, ker_pole_i_nu1))
else:
ker_pole_ij = np.vstack((
np.hstack((ker_pole[i],
np.zeros(ker_pole[i].shape))),
np.hstack((np.zeros(ker_pole[j].shape),
ker_pole[j]))
))
mu_nu_matrix = np.vstack(
(np.hstack((mu1, mu2)), np.hstack((nu1, nu2)))
)
ker_pole_mu_nu = np.dot(ker_pole_ij, mu_nu_matrix)
transfer_matrix_ij = np.dot(np.dot(ker_pole_mu_nu, ker_pole_mu_nu.T),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_ij, 0):
transfer_matrix_ij = (np.sqrt(2)*transfer_matrix_ij /
np.linalg.norm(transfer_matrix_ij))
transfer_matrix[:, i] = transfer_matrix_ij[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = transfer_matrix_ij[
transfer_matrix[:, i].shape[0]:, 0
]
else:
# As in knv0 if transfer_matrix_j_mo_transfer_matrix_j is orthogonal to
# Vect{ker_pole_mu_nu} assign transfer_matrixi/transfer_matrix_j to
# ker_pole_mu_nu and iterate. As we are looking for a vector in
# Vect{Matker_pole_MU_NU} (see section 6.1 page 19) this might help
# (that's a guess, not a claim !)
transfer_matrix[:, i] = ker_pole_mu_nu[
:transfer_matrix[:, i].shape[0], 0
]
transfer_matrix[:, j] = ker_pole_mu_nu[
transfer_matrix[:, i].shape[0]:, 0
]
def _YT_complex(ker_pole, Q, transfer_matrix, i, j):
"""
Applies algorithm from YT section 6.2 page 20 related to complex pairs
"""
# step 1 page 20
ur = np.sqrt(2)*Q[:, -2, np.newaxis]
ui = np.sqrt(2)*Q[:, -1, np.newaxis]
u = ur + 1j*ui
# step 2 page 20
ker_pole_ij = ker_pole[i]
m = np.dot(np.dot(np.conj(ker_pole_ij.T), np.dot(u, np.conj(u).T) -
np.dot(np.conj(u), u.T)), ker_pole_ij)
# step 3 page 20
e_val, e_vec = np.linalg.eig(m)
# sort eigenvalues according to their module
e_val_idx = np.argsort(np.abs(e_val))
mu1 = e_vec[:, e_val_idx[-1], np.newaxis]
mu2 = e_vec[:, e_val_idx[-2], np.newaxis]
# what follows is a rough python translation of the formulas
# in section 6.2 page 20 (step 4)
# remember transfer_matrix_i has been split as
# transfer_matrix[i]=real(transfer_matrix_i) and
# transfer_matrix[j]=imag(transfer_matrix_i)
transfer_matrix_j_mo_transfer_matrix_j = (
transfer_matrix[:, i, np.newaxis] +
1j*transfer_matrix[:, j, np.newaxis]
)
if not np.allclose(np.abs(e_val[e_val_idx[-1]]),
np.abs(e_val[e_val_idx[-2]])):
ker_pole_mu = np.dot(ker_pole_ij, mu1)
else:
mu1_mu2_matrix = np.hstack((mu1, mu2))
ker_pole_mu = np.dot(ker_pole_ij, mu1_mu2_matrix)
transfer_matrix_i_j = np.dot(np.dot(ker_pole_mu, np.conj(ker_pole_mu.T)),
transfer_matrix_j_mo_transfer_matrix_j)
if not np.allclose(transfer_matrix_i_j, 0):
transfer_matrix_i_j = (transfer_matrix_i_j /
np.linalg.norm(transfer_matrix_i_j))
transfer_matrix[:, i] = np.real(transfer_matrix_i_j[:, 0])
transfer_matrix[:, j] = np.imag(transfer_matrix_i_j[:, 0])
else:
# same idea as in YT_real
transfer_matrix[:, i] = np.real(ker_pole_mu[:, 0])
transfer_matrix[:, j] = np.imag(ker_pole_mu[:, 0])
def _YT_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Algorithm "YT" Tits, Yang. Globally Convergent
Algorithms for Robust Pole Assignment by State Feedback
http://drum.lib.umd.edu/handle/1903/5598
The poles P have to be sorted accordingly to section 6.2 page 20
"""
# The IEEE edition of the YT paper gives useful information on the
# optimal update order for the real poles in order to minimize the number
# of times we have to loop over all poles, see page 1442
nb_real = poles[np.isreal(poles)].shape[0]
# hnb => Half Nb Real
hnb = nb_real // 2
# Stick to the indices in the paper and then remove one to get numpy array
# index it is a bit easier to link the code to the paper this way even if it
# is not very clean. The paper is unclear about what should be done when
# there is only one real pole => use KNV0 on this real pole seem to work
if nb_real > 0:
#update the biggest real pole with the smallest one
update_order = [[nb_real], [1]]
else:
update_order = [[],[]]
r_comp = np.arange(nb_real+1, len(poles)+1, 2)
# step 1.a
r_p = np.arange(1, hnb+nb_real % 2)
update_order[0].extend(2*r_p)
update_order[1].extend(2*r_p+1)
# step 1.b
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 1.c
r_p = np.arange(1, hnb+1)
update_order[0].extend(2*r_p-1)
update_order[1].extend(2*r_p)
# step 1.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.a
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+j)
# step 2.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 2.c
r_j = np.arange(2, hnb+nb_real % 2)
for j in r_j:
for i in range(hnb+1, nb_real+1):
idx_1 = i+j
if idx_1 > nb_real:
idx_1 = i+j-nb_real
update_order[0].append(i)
update_order[1].append(idx_1)
# step 2.d
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
# step 3.a
for i in range(1, hnb+1):
update_order[0].append(i)
update_order[1].append(i+hnb)
# step 3.b
if hnb == 0 and np.isreal(poles[0]):
update_order[0].append(1)
update_order[1].append(1)
update_order[0].extend(r_comp)
update_order[1].extend(r_comp+1)
update_order = np.array(update_order).T-1
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for i, j in update_order:
if i == j:
assert i == 0, "i!=0 for KNV call in YT"
assert np.isreal(poles[i]), "calling KNV on a complex pole"
_KNV0(B, ker_pole, transfer_matrix, i, poles)
else:
transfer_matrix_not_i_j = np.delete(transfer_matrix, (i, j),
axis=1)
# after merge of gh-4249 great speed improvements could be
# achieved using QR updates instead of full QR in the line below
#to debug with numpy qr uncomment the line below
#Q, _ = np.linalg.qr(transfer_matrix_not_i_j, mode="complete")
Q, _ = s_qr(transfer_matrix_not_i_j, mode="full")
if np.isreal(poles[i]):
assert np.isreal(poles[j]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_real(ker_pole, Q, transfer_matrix, i, j)
else:
assert ~np.isreal(poles[i]), "mixing real and complex " + \
"in YT_real" + str(poles)
_YT_complex(ker_pole, Q, transfer_matrix, i, j)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs(
(det_transfer_matrix -
det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def _KNV0_loop(ker_pole, transfer_matrix, poles, B, maxiter, rtol):
"""
Loop over all poles one by one and apply KNV method 0 algorithm
"""
# This method is useful only because we need to be able to call
# _KNV0 from YT without looping over all poles, otherwise it would
# have been fine to mix _KNV0_loop and _KNV0 in a single function
stop = False
nb_try = 0
while nb_try < maxiter and not stop:
det_transfer_matrixb = np.abs(np.linalg.det(transfer_matrix))
for j in range(B.shape[0]):
_KNV0(B, ker_pole, transfer_matrix, j, poles)
det_transfer_matrix = np.max((np.sqrt(np.spacing(1)),
np.abs(np.linalg.det(transfer_matrix))))
cur_rtol = np.abs((det_transfer_matrix - det_transfer_matrixb) /
det_transfer_matrix)
if cur_rtol < rtol and det_transfer_matrix > np.sqrt(np.spacing(1)):
# Convergence test from YT page 21
stop = True
nb_try += 1
return stop, cur_rtol, nb_try
def place_poles(A, B, poles, method="YT", rtol=1e-3, maxiter=30):
"""
Compute K such that eigenvalues (A - dot(B, K))=poles.
K is the gain matrix such as the plant described by the linear system
``AX+BU`` will have its closed-loop poles, i.e the eigenvalues ``A - B*K``,
as close as possible to those asked for in poles.
SISO, MISO and MIMO systems are supported.
Parameters
----------
A, B : ndarray
State-space representation of linear system ``AX + BU``.
poles : array_like
Desired real poles and/or complex conjugates poles.
Complex poles are only supported with ``method="YT"`` (default).
method: {'YT', 'KNV0'}, optional
Which method to choose to find the gain matrix K. One of:
- 'YT': Yang Tits
- 'KNV0': Kautsky, Nichols, Van Dooren update method 0
See References and Notes for details on the algorithms.
rtol: float, optional
After each iteration the determinant of the eigenvectors of
``A - B*K`` is compared to its previous value, when the relative
error between these two values becomes lower than `rtol` the algorithm
stops. Default is 1e-3.
maxiter: int, optional
Maximum number of iterations to compute the gain matrix.
Default is 30.
Returns
-------
full_state_feedback : Bunch object
full_state_feedback is composed of:
gain_matrix : 1-D ndarray
The closed loop matrix K such as the eigenvalues of ``A-BK``
are as close as possible to the requested poles.
computed_poles : 1-D ndarray
The poles corresponding to ``A-BK`` sorted as first the real
poles in increasing order, then the complex congugates in
lexicographic order.
requested_poles : 1-D ndarray
The poles the algorithm was asked to place sorted as above,
they may differ from what was achieved.
X : 2D ndarray
The transfer matrix such as ``X * diag(poles) = (A - B*K)*X``
(see Notes)
rtol : float
The relative tolerance achieved on ``det(X)`` (see Notes).
`rtol` will be NaN if the optimisation algorithms can not run,
i.e when ``B.shape[1] == 1``, or 0 when the solution is unique.
nb_iter : int
The number of iterations performed before converging.
`nb_iter` will be NaN if the optimisation algorithms can
not run, i.e when ``B.shape[1] == 1``, or 0 when the solution
is unique.
Notes
-----
The Tits and Yang (YT), [2]_ paper is an update of the original Kautsky et
al. (KNV) paper [1]_. KNV relies on rank-1 updates to find the transfer
matrix X such that ``X * diag(poles) = (A - B*K)*X``, whereas YT uses
rank-2 updates. This yields on average more robust solutions (see [2]_
pp 21-22), furthermore the YT algorithm supports complex poles whereas KNV
does not in its original version. Only update method 0 proposed by KNV has
been implemented here, hence the name ``'KNV0'``.
KNV extended to complex poles is used in Matlab's ``place`` function, YT is
distributed under a non-free licence by Slicot under the name ``robpole``.
It is unclear and undocumented how KNV0 has been extended to complex poles
(Tits and Yang claim on page 14 of their paper that their method can not be
used to extend KNV to complex poles), therefore only YT supports them in
this implementation.
As the solution to the problem of pole placement is not unique for MIMO
systems, both methods start with a tentative transfer matrix which is
altered in various way to increase its determinant. Both methods have been
proven to converge to a stable solution, however depending on the way the
initial transfer matrix is chosen they will converge to different
solutions and therefore there is absolutely no guarantee that using
``'KNV0'`` will yield results similar to Matlab's or any other
implementation of these algorithms.
Using the default method ``'YT'`` should be fine in most cases; ``'KNV0'``
is only provided because it is needed by ``'YT'`` in some specific cases.
Furthermore ``'YT'`` gives on average more robust results than ``'KNV0'``
when ``abs(det(X))`` is used as a robustness indicator.
[2]_ is available as a technical report on the following URL:
http://drum.lib.umd.edu/handle/1903/5598
References
----------
.. [1] J. Kautsky, N.K. Nichols and P. van Dooren, "Robust pole assignment
in linear state feedback", International Journal of Control, Vol. 41
pp. 1129-1155, 1985.
.. [2] A.L. Tits and Y. Yang, "Globally convergent algorithms for robust
pole assignment by state feedback, IEEE Transactions on Automatic
Control, Vol. 41, pp. 1432-1452, 1996.
Examples
--------
A simple example demonstrating real pole placement using both KNV and YT
algorithms. This is example number 1 from section 4 of the reference KNV
publication ([1]_):
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> A = np.array([[ 1.380, -0.2077, 6.715, -5.676 ],
... [-0.5814, -4.290, 0, 0.6750 ],
... [ 1.067, 4.273, -6.654, 5.893 ],
... [ 0.0480, 4.273, 1.343, -2.104 ]])
>>> B = np.array([[ 0, 5.679 ],
... [ 1.136, 1.136 ],
... [ 0, 0, ],
... [-3.146, 0 ]])
>>> P = np.array([-0.2, -0.5, -5.0566, -8.6659])
Now compute K with KNV method 0, with the default YT method and with the YT
method while forcing 100 iterations of the algorithm and print some results
after each call.
>>> fsf1 = signal.place_poles(A, B, P, method='KNV0')
>>> fsf1.gain_matrix
array([[ 0.20071427, -0.96665799, 0.24066128, -0.10279785],
[ 0.50587268, 0.57779091, 0.51795763, -0.41991442]])
>>> fsf2 = signal.place_poles(A, B, P) # uses YT method
>>> fsf2.computed_poles
array([-8.6659, -5.0566, -0.5 , -0.2 ])
>>> fsf3 = signal.place_poles(A, B, P, rtol=-1, maxiter=100)
>>> fsf3.X
array([[ 0.52072442+0.j, -0.08409372+0.j, -0.56847937+0.j, 0.74823657+0.j],
[-0.04977751+0.j, -0.80872954+0.j, 0.13566234+0.j, -0.29322906+0.j],
[-0.82266932+0.j, -0.19168026+0.j, -0.56348322+0.j, -0.43815060+0.j],
[ 0.22267347+0.j, 0.54967577+0.j, -0.58387806+0.j, -0.40271926+0.j]])
The absolute value of the determinant of X is a good indicator to check the
robustness of the results, both ``'KNV0'`` and ``'YT'`` aim at maximizing
it. Below a comparison of the robustness of the results above:
>>> abs(np.linalg.det(fsf1.X)) < abs(np.linalg.det(fsf2.X))
True
>>> abs(np.linalg.det(fsf2.X)) < abs(np.linalg.det(fsf3.X))
True
Now a simple example for complex poles:
>>> A = np.array([[ 0, 7/3., 0, 0 ],
... [ 0, 0, 0, 7/9. ],
... [ 0, 0, 0, 0 ],
... [ 0, 0, 0, 0 ]])
>>> B = np.array([[ 0, 0 ],
... [ 0, 0 ],
... [ 1, 0 ],
... [ 0, 1 ]])
>>> P = np.array([-3, -1, -2-1j, -2+1j]) / 3.
>>> fsf = signal.place_poles(A, B, P, method='YT')
We can plot the desired and computed poles in the complex plane:
>>> t = np.linspace(0, 2*np.pi, 401)
>>> plt.plot(np.cos(t), np.sin(t), 'k--') # unit circle
>>> plt.plot(fsf.requested_poles.real, fsf.requested_poles.imag,
... 'wo', label='Desired')
>>> plt.plot(fsf.computed_poles.real, fsf.computed_poles.imag, 'bx',
... label='Placed')
>>> plt.grid()
>>> plt.axis('image')
>>> plt.axis([-1.1, 1.1, -1.1, 1.1])
>>> plt.legend(bbox_to_anchor=(1.05, 1), loc=2, numpoints=1)
"""
# Move away all the inputs checking, it only adds noise to the code
update_loop, poles = _valid_inputs(A, B, poles, method, rtol, maxiter)
# The current value of the relative tolerance we achieved
cur_rtol = np.nan
# The number of iterations needed before converging
nb_iter = np.nan
# Step A: QR decomposition of B page 1132 KN
# to debug with numpy qr uncomment the line below
# u, z = np.linalg.qr(B, mode="complete")
u, z = s_qr(B, mode="full")
rankB = np.linalg.matrix_rank(B)
u0 = u[:, :rankB]
u1 = u[:, rankB:]
z = z[:rankB, :]
# If the solution is unique
if B.shape[0] == rankB:
# if B is square and full rank there is only one solution
# such as (A+BK)=diag(P) i.e BK=diag(P)-A
# if B has as many lines as its rank (but not square) the solution
# is the same as above using least squares
# => use lstsq in both cases
# for complex poles we use the following trick
#
# |a -b| has for eigenvalues a+b and a-b
# |b a|
#
# |a+bi 0| has the obvious eigenvalues a+bi and a-bi
# |0 a-bi|
#
# e.g solving the first one in R gives the solution
# for the second one in C
diag_poles = np.zeros(A.shape)
idx = 0
while idx < poles.shape[0]:
p = poles[idx]
diag_poles[idx, idx] = np.real(p)
if ~np.isreal(p):
diag_poles[idx, idx+1] = -np.imag(p)
diag_poles[idx+1, idx+1] = np.real(p)
diag_poles[idx+1, idx] = np.imag(p)
idx += 1 # skip next one
idx += 1
gain_matrix = np.linalg.lstsq(B, diag_poles-A)[0]
transfer_matrix = np.eye(A.shape[0])
cur_rtol = 0
nb_iter = 0
else:
# step A (p1144 KNV) and begining of step F: decompose
# dot(U1.T, A-P[i]*I).T and build our set of transfer_matrix vectors
# in the same loop
ker_pole = []
# flag to skip the conjugate of a complex pole
skip_conjugate = False
# select orthonormal base ker_pole for each Pole and vectors for
# transfer_matrix
for j in range(B.shape[0]):
if skip_conjugate:
skip_conjugate = False
continue
pole_space_j = np.dot(u1.T, A-poles[j]*np.eye(B.shape[0])).T
# after QR Q=Q0|Q1
# only Q0 is used to reconstruct the qr'ed (dot Q, R) matrix.
# Q1 is orthogonnal to Q0 and will be multiplied by the zeros in
# R when using mode "complete". In default mode Q1 and the zeros
# in R are not computed
# To debug with numpy qr uncomment the line below
# Q, _ = np.linalg.qr(pole_space_j, mode="complete")
Q, _ = s_qr(pole_space_j, mode="full")
ker_pole_j = Q[:, pole_space_j.shape[1]:]
# We want to select one vector in ker_pole_j to build the transfer
# matrix, however qr returns sometimes vectors with zeros on the same
# line for each pole and this yields very long convergence times.
# Or some other times a set of vectors, one with zero imaginary
# part and one (or several) with imaginary parts. After trying
# many ways to select the best possible one (eg ditch vectors
# with zero imaginary part for complex poles) I ended up summing
# all vectors in ker_pole_j, this solves 100% of the problems and is
# still a valid choice for transfer_matrix. Indeed for complex poles
# we are sure to have a non zero imaginary part that way, and the
# problem of lines full of zeros in transfer_matrix is solved too as
# when a vector from ker_pole_j has a zero the other one(s)
# (when ker_pole_j.shape[1]>1) for sure won't have a zero there.
transfer_matrix_j = np.sum(ker_pole_j, axis=1)[:, np.newaxis]
transfer_matrix_j = (transfer_matrix_j /
np.linalg.norm(transfer_matrix_j))
if ~np.isreal(poles[j]): # complex pole
transfer_matrix_j = np.hstack([np.real(transfer_matrix_j),
np.imag(transfer_matrix_j)])
ker_pole.extend([ker_pole_j, ker_pole_j])
# Skip next pole as it is the conjugate
skip_conjugate = True
else: # real pole, nothing to do
ker_pole.append(ker_pole_j)
if j == 0:
transfer_matrix = transfer_matrix_j
else:
transfer_matrix = np.hstack((transfer_matrix, transfer_matrix_j))
if rankB > 1: # otherwise there is nothing we can optimize
stop, cur_rtol, nb_iter = update_loop(ker_pole, transfer_matrix,
poles, B, maxiter, rtol)
if not stop and rtol > 0:
# if rtol<=0 the user has probably done that on purpose,
# don't annoy him
err_msg = (
"Convergence was not reached after maxiter iterations.\n"
"You asked for a relative tolerance of %f we got %f" %
(rtol, cur_rtol)
)
warnings.warn(err_msg)
# reconstruct transfer_matrix to match complex conjugate pairs,
# ie transfer_matrix_j/transfer_matrix_j+1 are
# Re(Complex_pole), Im(Complex_pole) now and will be Re-Im/Re+Im after
transfer_matrix = transfer_matrix.astype(complex)
idx = 0
while idx < poles.shape[0]-1:
if ~np.isreal(poles[idx]):
rel = transfer_matrix[:, idx].copy()
img = transfer_matrix[:, idx+1]
# rel will be an array referencing a column of transfer_matrix
# if we don't copy() it will changer after the next line and
# and the line after will not yield the correct value
transfer_matrix[:, idx] = rel-1j*img
transfer_matrix[:, idx+1] = rel+1j*img
idx += 1 # skip next one
idx += 1
try:
m = np.linalg.solve(transfer_matrix.T, np.dot(np.diag(poles),
transfer_matrix.T)).T
gain_matrix = np.linalg.solve(z, np.dot(u0.T, m-A))
except np.linalg.LinAlgError:
raise ValueError("The poles you've chosen can't be placed. "
"Check the controllability matrix and try "
"another set of poles")
# Beware: Kautsky solves A+BK but the usual form is A-BK
gain_matrix = -gain_matrix
# K still contains complex with ~=0j imaginary parts, get rid of them
gain_matrix = np.real(gain_matrix)
full_state_feedback = Bunch()
full_state_feedback.gain_matrix = gain_matrix
full_state_feedback.computed_poles = _order_complex_poles(
np.linalg.eig(A - np.dot(B, gain_matrix))[0]
)
full_state_feedback.requested_poles = poles
full_state_feedback.X = transfer_matrix
full_state_feedback.rtol = cur_rtol
full_state_feedback.nb_iter = nb_iter
return full_state_feedback
| bsd-3-clause |
mingwpy/scipy | scipy/stats/stats.py | 18 | 169352 | # Copyright (c) Gary Strangman. All rights reserved
#
# Disclaimer
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
#
# Heavily adapted for use by SciPy 2002 by Travis Oliphant
"""
A collection of basic statistical functions for python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Moments Handling NaN:
.. autosummary::
:toctree: generated/
nanmean
nanmedian
nanstd
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
signaltonoise
sem
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
threshold
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
linregress
theilslopes
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
combine_pvalues
Probability Calculations
------------------------
.. autosummary::
:toctree: generated/
chisqprob
betai
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
f_value
Support Functions
-----------------
.. autosummary::
:toctree: generated/
ss
square_of_sums
rankdata
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import math
from collections import namedtuple
from scipy._lib.six import xrange
# Scipy imports.
from scipy._lib.six import callable, string_types
from numpy import array, asarray, ma, zeros
import scipy.special as special
import scipy.linalg as linalg
import numpy as np
from . import distributions
from . import mstats_basic
from ._distn_infrastructure import _lazywhere
from ._stats_mstats_common import find_repeats, linregress, theilslopes
from ._rank import rankdata, tiecorrect
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore', 'histogram',
'histogram2', 'cumfreq', 'relfreq', 'obrientransform',
'signaltonoise', 'sem', 'zmap', 'zscore', 'threshold',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'linregress', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'chisqprob', 'betai',
'f_value_wilks_lambda', 'f_value', 'f_value_multivariate',
'ss', 'square_of_sums', 'fastsort', 'rankdata', 'nanmean',
'nanstd', 'nanmedian', 'combine_pvalues', ]
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
if nan_policy not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be either 'propagate', 'raise', or "
"'ignore'")
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# If the check cannot be properly performed we fallback to omiting
# nan values and raising a warning. This can happen when attempting to
# sum things that are not numbers (e.g. as in the function `mode`).
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
#######
# NAN friendly functions
########
@np.deprecate(message="scipy.stats.nanmean is deprecated in scipy 0.15.0 "
"in favour of numpy.nanmean.")
def nanmean(x, axis=0):
"""
Compute the mean over the given axis ignoring nans.
Parameters
----------
x : ndarray
Input array.
axis : int or None, optional
Axis along which the mean is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The mean of `x`, ignoring nans.
See Also
--------
nanstd, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.linspace(0, 4, 3)
>>> a
array([ 0., 2., 4.])
>>> a[-1] = np.nan
>>> stats.nanmean(a)
1.0
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
factor = 1.0 - np.sum(mask, axis) / Norig
x[mask] = 0.0
return np.mean(x, axis) / factor
@np.deprecate(message="scipy.stats.nanstd is deprecated in scipy 0.15 "
"in favour of numpy.nanstd.\nNote that numpy.nanstd "
"has a different signature.")
def nanstd(x, axis=0, bias=False):
"""
Compute the standard deviation over the given axis, ignoring nans.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the standard deviation is computed. Default is 0.
If None, compute over the whole array `x`.
bias : bool, optional
If True, the biased (normalized by N) definition is used. If False
(default), the unbiased definition is used.
Returns
-------
s : float
The standard deviation.
See Also
--------
nanmean, nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10, dtype=float)
>>> a[1:3] = np.nan
>>> np.std(a)
nan
>>> stats.nanstd(a)
2.9154759474226504
>>> stats.nanstd(a.reshape(2, 5), axis=1)
array([ 2.0817, 1.5811])
>>> stats.nanstd(a.reshape(2, 5), axis=None)
2.9154759474226504
"""
x, axis = _chk_asarray(x, axis)
x = x.copy()
Norig = x.shape[axis]
mask = np.isnan(x)
Nnan = np.sum(mask, axis) * 1.0
n = Norig - Nnan
x[mask] = 0.0
m1 = np.sum(x, axis) / n
if axis:
d = x - np.expand_dims(m1, axis)
else:
d = x - m1
d *= d
m2 = np.sum(d, axis) - m1 * m1 * Nnan
if bias:
m2c = m2 / n
else:
m2c = m2 / (n - 1.0)
return np.sqrt(m2c)
def _nanmedian(arr1d): # This only works on 1d arrays
"""Private function for rank a arrays. Compute the median ignoring Nan.
Parameters
----------
arr1d : ndarray
Input array, of rank 1.
Results
-------
m : float
The median.
"""
x = arr1d.copy()
c = np.isnan(x)
s = np.where(c)[0]
if s.size == x.size:
warnings.warn("All-NaN slice encountered", RuntimeWarning)
return np.nan
elif s.size != 0:
# select non-nans at end of array
enonan = x[-s.size:][~c[-s.size:]]
# fill nans in beginning of array with non-nans of end
x[s[:enonan.size]] = enonan
# slice nans away
x = x[:-s.size]
return np.median(x, overwrite_input=True)
@np.deprecate(message="scipy.stats.nanmedian is deprecated in scipy 0.15 "
"in favour of numpy.nanmedian.")
def nanmedian(x, axis=0):
"""
Compute the median along the given axis ignoring nan values.
Parameters
----------
x : array_like
Input array.
axis : int or None, optional
Axis along which the median is computed. Default is 0.
If None, compute over the whole array `x`.
Returns
-------
m : float
The median of `x` along `axis`.
See Also
--------
nanstd, nanmean, numpy.nanmedian
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 3, 1, 5, 5, np.nan])
>>> stats.nanmedian(a)
array(3.0)
>>> b = np.array([0, 3, 1, 5, 5, np.nan, 5])
>>> stats.nanmedian(b)
array(4.0)
Example with axis:
>>> c = np.arange(30.).reshape(5,6)
>>> idx = np.array([False, False, False, True, False] * 6).reshape(5,6)
>>> c[idx] = np.nan
>>> c
array([[ 0., 1., 2., nan, 4., 5.],
[ 6., 7., nan, 9., 10., 11.],
[ 12., nan, 14., 15., 16., 17.],
[ nan, 19., 20., 21., 22., nan],
[ 24., 25., 26., 27., nan, 29.]])
>>> stats.nanmedian(c, axis=1)
array([ 2. , 9. , 15. , 20.5, 26. ])
"""
x, axis = _chk_asarray(x, axis)
if x.ndim == 0:
return float(x.item())
if hasattr(np, 'nanmedian'): # numpy 1.9 faster for some cases
return np.nanmedian(x, axis)
x = np.apply_along_axis(_nanmedian, axis, x)
if x.ndim == 0:
x = float(x.item())
return x
#####################################
# CENTRAL TENDENCY #
#####################################
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Returns the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
see dtype parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
"""
if not isinstance(a, np.ndarray): # if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype: # Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculates the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
see `dtype` parameter above
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a > 0): # Harmonic mean only defined if greater than zero
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
return size / np.sum(1.0/a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater than zero")
def mode(a, axis=0, nan_policy='propagate'):
"""
Returns an array of the modal (most common) value in the passed array.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return np.array([]), np.array([])
contains_nan, nan_policy = _contains_nan(a, nan_policy)
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
return ModeResult(mostfrequent, oldcounts)
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
See also
--------
trim_mean : returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float).ravel()
if limits is None:
n = len(a)
return a.var() * n/(n-1.)
am = _mask_to_limits(a, limits, inclusive)
return np.ma.var(am, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
array of values
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmin : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
array of values
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
tmax : float, int or ndarray
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
array of values
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
"""
Calculates the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of points.
It is often used to calculate coefficients of skewness and kurtosis due
to its close relationship with them.
Parameters
----------
a : array_like
data
moment : int or array_like of ints, optional
order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] http://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if contains_nan and nan_policy == 'propagate':
return np.nan
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.ones(np.asarray(moment).shape, dtype=np.float64) * np.nan
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Computes the coefficient of variation, the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
if contains_nan and nan_policy == 'propagate':
return np.nan
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
"""
Computes the skewness of a data set.
For normally distributed data, the skewness should be about 0. A skewness
value > 0 means that there is more weight in the left tail of the
distribution. The function `skewtest` can be used to determine if the
skewness value is close enough to 0, statistically speaking.
Parameters
----------
a : ndarray
data
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
if contains_nan and nan_policy == 'propagate':
return np.nan
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n-1.0)*n) / (n-2.0) * m3/m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Computes the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
data for which the kurtosis is calculated
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
if contains_nan and nan_policy == 'propagate':
return np.nan
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
if fisher:
return vals - 3
else:
return vals
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
nobs : int
Number of observations (length of data along `axis`).
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.1666666666666661,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([ 2., 3.]), variance=array([ 2., 2.]),
skewness=array([ 0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
# Return namedtuple for clarity
DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if contains_nan and nan_policy == 'propagate':
res = np.zeros(6) * np.nan
return DescribeResult(*res)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Tests whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
a 2-sided p-value for the hypothesis test
Notes
-----
The sample size must be at least 8.
"""
a, axis = _chk_asarray(a, axis)
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if contains_nan and nan_policy == 'propagate':
return SkewtestResult(np.nan, np.nan)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = float(a.shape[axis])
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a dataset has normal kurtosis
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
array of the sample data
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The 2-sided p-value for the hypothesis test
Notes
-----
Valid only for n>20. The Z-score is set to 0 for bad entries.
"""
a, axis = _chk_asarray(a, axis)
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic',
'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
if contains_nan and nan_policy == 'propagate':
return KurtosistestResult(np.nan, np.nan)
n = float(a.shape[axis])
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
x = (b2-E) / np.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
denom = np.where(denom < 0, 99, denom)
term2 = np.where(denom < 0, term1, np.power((1-2.0/A)/denom, 1/3.0))
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
Z = np.where(denom == 99, 0, Z)
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Tests whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the data to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size," Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Testing for
departures from normality," Biometrika, 60, 613-622
"""
a, axis = _chk_asarray(a, axis)
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
if contains_nan and nan_policy == 'propagate':
return NormaltestResult(np.nan, np.nan)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = float(x.size)
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
def itemfreq(a):
"""
Returns a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
- fraction: ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``.
- lower: ``i``.
- higher: ``j``.
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For Numpy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.ones(np.asarray(per).shape, dtype=np.float64) * np.nan
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted, i, interpolation_method, axis)
for i in per]
return np.array(score)
if (per < 0) or (per > 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted.ndim
idx = per / 100. * (sorted.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted[indexer] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
The percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
This optional parameter specifies the interpretation of the
resulting score:
- "rank": Average percentage ranking of score. In case of
multiple matches, average the percentage rankings of
all matching scores.
- "weak": This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80%
means that 80% of values are less than or equal
to the provided score.
- "strict": Similar to "weak", except that only values that are
strictly less than the given score are counted.
- "mean": The average of the "weak" and "strict" scores, often used in
testing. See
http://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
a = np.array(a)
n = len(a)
if kind == 'rank':
if not np.any(a == score):
a = np.append(a, score)
a_len = np.array(list(range(len(a))))
else:
a_len = np.array(list(range(len(a)))) + 1.0
a = np.sort(a)
idx = [a == score]
pct = (np.mean(a_len[idx]) / n) * 100.0
return pct
elif kind == 'strict':
return np.sum(a < score) / float(n) * 100
elif kind == 'weak':
return np.sum(a <= score) / float(n) * 100
elif kind == 'mean':
return (np.sum(a < score) + np.sum(a <= score)) * 50 / float(n)
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
@np.deprecate(message=("scipy.stats.histogram2 is deprecated in scipy 0.16.0; "
"use np.histogram2d instead"))
def histogram2(a, bins):
"""
Compute histogram using divisions in bins.
Count the number of times values from array `a` fall into
numerical ranges defined by `bins`. Range x is given by
bins[x] <= range_x < bins[x+1] where x =0,N and N is the
length of the `bins` array. The last range is given by
bins[N] <= range_N < infinity. Values less than bins[0] are
not included in the histogram.
Parameters
----------
a : array_like of rank 1
The array of values to be assigned into bins
bins : array_like of rank 1
Defines the ranges of values to use during histogramming.
Returns
-------
histogram2 : ndarray of rank 1
Each value represents the occurrences for a given bin (range) of
values.
"""
# comment: probably obsoleted by numpy.histogram()
n = np.searchsorted(np.sort(a), bins)
n = np.concatenate([n, [len(a)]])
return n[1:] - n[:-1]
def histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Separates the range into several bins and returns the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
HistogramResult = namedtuple('HistogramResult', ('count', 'lowerlimit',
'binsize', 'extrapoints'))
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
CumfreqResult = namedtuple('CumfreqResult', ('cumcount', 'lowerlimit',
'binsize', 'extrapoints'))
return CumfreqResult(cumhist, l, b, e)
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Returns a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = histogram(a, numbins, defaultreallimits, weights=weights)
h = h / float(a.shape[0])
RelfreqResult = namedtuple('RelfreqResult', ('frequency', 'lowerlimit',
'binsize', 'extrapoints'))
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Computes the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
# If the arrays are not all the same shape, calling np.array(arrays)
# creates a 1-D array with dtype `object` in numpy 1.6+. In numpy
# 1.5.x, it raises an exception. To work around this, we explicitly
# set the dtype to `object` when the arrays are not all the same shape.
if len(arrays) < 2 or all(x.shape == arrays[0].shape for x in arrays[1:]):
dt = None
else:
dt = object
return np.array(arrays, dtype=dt)
@np.deprecate(message="scipy.stats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(a, axis=0, ddof=0):
"""
The signal-to-noise ratio of the input data.
Returns the signal-to-noise ratio of `a`, here defined as the mean
divided by the standard deviation.
Parameters
----------
a : array_like
An array_like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction for standard deviation. Default is 0.
Returns
-------
s2n : ndarray
The mean to standard deviation ratio(s) along `axis`, or 0 where the
standard deviation is 0.
"""
a = np.asanyarray(a)
m = a.mean(axis)
sd = a.std(axis=axis, ddof=ddof)
return np.where(sd == 0, 0, m/sd)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Calculates the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std nd stats.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
if contains_nan and nan_policy == 'propagate':
return np.nan
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0):
"""
Calculates the z score of each value in the sample, relative to the sample
mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of input
array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091, 0.1954,
... 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom (``ddof=1``)
to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
mns = a.mean(axis=axis)
sstd = a.std(axis=axis, ddof=ddof)
if axis and mns.ndim < a.ndim:
return ((a - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculates the relative z-scores.
Returns an array of z-scores, i.e., scores that are standardized to zero
mean and unit variance, where mean and variance are calculated from the
comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of `asarray`
for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis)
sstd = compare.std(axis=axis, ddof=ddof)
if axis and mns.ndim < compare.ndim:
return ((scores - np.expand_dims(mns, axis=axis)) /
np.expand_dims(sstd, axis=axis))
else:
return (scores - mns) / sstd
#####################################
# TRIMMING FUNCTIONS #
#####################################
@np.deprecate(message="stats.threshold is deprecated in scipy 0.17.0")
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : array_like
Data to threshold.
threshmin : float, int or None, optional
Minimum threshold, defaults to None.
threshmax : float, int or None, optional
Maximum threshold, defaults to None.
newval : float or int, optional
Value to put in place of values in `a` outside of bounds.
Defaults to 0.
Returns
-------
out : ndarray
The clipped input array, with values less than `threshmin` or
greater than `threshmax` replaced with `newval`.
Examples
--------
>>> a = np.array([9, 9, 6, 3, 1, 6, 1, 0, 0, 8])
>>> from scipy import stats
>>> stats.threshold(a, threshmin=2, threshmax=8, newval=-1)
array([-1, -1, 6, 3, -1, 6, -1, -1, -1, 8])
"""
a = asarray(a).copy()
mask = zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin)
if threshmax is not None:
mask |= (a > threshmax)
a[mask] = newval
return a
def sigmaclip(a, low=4., high=4.):
"""
Iterative sigma-clipping of array elements.
The output array contains only those elements of the input array `c`
that satisfy the conditions ::
mean(c) - std(c)*low < c < mean(c) + std(c)*high
Starting from the full sample, all elements outside the critical range are
removed. The iteration continues with a new critical range until no
elements are outside the range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std*low
critupper = c_mean + c_std*high
c = c[(c > critlower) & (c < critupper)]
delta = size - c.size
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower',
'upper'))
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slices off a proportion of items from both ends of an array.
Slices off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slices off less if proportion results in a non-integer slice index (i.e.,
conservatively slices off`proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[sl]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slices off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slices off less if proportion results in a non-integer slice index
(i.e., conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array
proportiontocut : float
Fraction to cut off of both tails of the distribution
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
# np.partition is preferred but it only exist in numpy 1.8.0 and higher,
# in those cases we use np.sort
try:
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
except AttributeError:
atmp = np.sort(a, axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[sl], axis=axis)
def f_oneway(*args):
"""
Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
.. [3] McDonald, G. H. "Handbook of Biological Statistics", One-way ANOVA.
http://http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
F_onewayResult(statistic=7.1210194716424473, pvalue=0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / float(bign))
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / float(len(a))
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= (_square_of_sums(alldata) / float(bign))
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
return F_onewayResult(f, prob)
def pearsonr(x, y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as x increases, so does
y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : (N,) array_like
Input
y : (N,) array_like
Input
Returns
-------
(Pearson's correlation coefficient,
2-tailed p-value)
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
# x and y should have same length.
x = np.asarray(x)
y = np.asarray(y)
n = len(x)
mx = x.mean()
my = y.mean()
xm, ym = x - mx, y - my
r_num = np.add.reduce(xm * ym)
r_den = np.sqrt(_sum_of_squares(xm) * _sum_of_squares(ym))
r = r_num / r_den
# Presumably, if abs(r) > 1, then it is only some small artifact of floating
# point arithmetic.
r = max(min(r, 1.0), -1.0)
df = n - 2
if abs(r) == 1.0:
prob = 0.0
else:
t_squared = r**2 * (df / ((1.0 - r) * (1.0 + r)))
prob = _betai(0.5*df, 0.5, df/(df+t_squared))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Performs a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Which alternative hypothesis to the null hypothesis the test uses.
Default is 'two-sided'.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1,0] > 0 and c[0,1] > 0:
oddsratio = c[0,0] * c[1,1] / float(c[1,0] * c[0,1])
else:
oddsratio = np.inf
n1 = c[0,0] + c[0,1]
n2 = c[1,0] + c[1,1]
n = c[0,0] + c[1,0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin lower/upper halves in two-sided
test.
"""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0,1], n1 + n2, n1, c[0,1] + c[1,1])
elif alternative == 'two-sided':
mode = int(float((n + 1) * (n1 + 1)) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0,0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0,0] < mode:
plower = hypergeom.cdf(c[0,0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0,0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
if pvalue > 1.0:
pvalue = 1.0
return oddsratio, pvalue
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the monotonicity
of the relationship between two datasets. Unlike the Pearson correlation,
the Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact monotonic relationship. Positive correlations imply that
as x increases, so does y. Negative correlations imply that as x
increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in a and b
combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
Notes
-----
Changes in scipy 0.8.0: rewrite to add tie-handling, and axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if contains_nan and nan_policy == 'propagate':
return SpearmanrResult(np.nan, np.nan)
if a.size <= 1:
return SpearmanrResult(np.nan, np.nan)
ar = np.apply_along_axis(rankdata, axisout, a)
br = None
if b is not None:
b, axisout = _chk_asarray(b, axis)
contains_nan, nan_policy = _contains_nan(b, nan_policy)
if contains_nan and nan_policy == 'omit':
b = ma.masked_invalid(b)
return mstats_basic.spearmanr(a, b, axis)
if contains_nan and nan_policy == 'propagate':
return SpearmanrResult(np.nan, np.nan)
br = np.apply_along_axis(rankdata, axisout, b)
n = a.shape[axisout]
rs = np.corrcoef(ar, br, rowvar=axisout)
olderr = np.seterr(divide='ignore') # rs can have elements equal to 1
try:
t = rs * np.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), n-2)
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
return SpearmanrResult(rs, prob)
def pointbiserialr(x, y):
"""
Calculates a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value
pvalue : float
2-tailed p-value
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] http://onlinelibrary.wiley.com/doi/10.1002/9781118445112.stat06227/full
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation',
'pvalue'))
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
def kendalltau(x, y, initial_lexsort=True, nan_policy='propagate'):
"""
Calculates Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the tau-b version of Kendall's tau which
accounts for ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Whether to use lexsort or quicksort as the sorting method for the
initial sort of the inputs. Default is lexsort (True), for which
`kendalltau` is of complexity O(n log(n)). If False, the complexity is
O(n^2), but with a smaller pre-factor (so quicksort may be faster for
small arrays).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
The definition of Kendall's tau that is used is::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association, Vol. 61,
No. 314, Part 1, pp. 436-439, 1966.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.24821309157521476
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
contains_nan, nan_policy = (_contains_nan(x, nan_policy) or
_contains_nan(y, nan_policy))
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y)
n = np.int64(len(x))
temp = list(range(n)) # support structure used by mergesort
# this closure recursively sorts sections of perm[] by comparing
# elements of y[perm[]] using temp[] as support
# returns the number of swaps required by an equivalent bubble sort
def mergesort(offs, length):
exchcnt = 0
if length == 1:
return 0
if length == 2:
if y[perm[offs]] <= y[perm[offs+1]]:
return 0
t = perm[offs]
perm[offs] = perm[offs+1]
perm[offs+1] = t
return 1
length0 = length // 2
length1 = length - length0
middle = offs + length0
exchcnt += mergesort(offs, length0)
exchcnt += mergesort(middle, length1)
if y[perm[middle - 1]] < y[perm[middle]]:
return exchcnt
# merging
i = j = k = 0
while j < length0 or k < length1:
if k >= length1 or (j < length0 and y[perm[offs + j]] <=
y[perm[middle + k]]):
temp[i] = perm[offs + j]
d = i - j
j += 1
else:
temp[i] = perm[middle + k]
d = (offs + i) - (middle + k)
k += 1
if d > 0:
exchcnt += d
i += 1
perm[offs:offs+length] = temp[0:length]
return exchcnt
# initial sort on values of x and, if tied, on values of y
if initial_lexsort:
# sort implemented as mergesort, worst case: O(n log(n))
perm = np.lexsort((y, x))
else:
# sort implemented as quicksort, 30% faster but with worst case: O(n^2)
perm = list(range(n))
perm.sort(key=lambda a: (x[a], y[a]))
# compute joint ties
first = 0
t = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]] or y[perm[first]] != y[perm[i]]:
t += ((i - first) * (i - first - 1)) // 2
first = i
t += ((n - first) * (n - first - 1)) // 2
# compute ties in x
first = 0
u = 0
for i in xrange(1, n):
if x[perm[first]] != x[perm[i]]:
u += ((i - first) * (i - first - 1)) // 2
first = i
u += ((n - first) * (n - first - 1)) // 2
# count exchanges
exchanges = mergesort(0, n)
# compute ties in y after mergesort with counting
first = 0
v = 0
for i in xrange(1, n):
if y[perm[first]] != y[perm[i]]:
v += ((i - first) * (i - first - 1)) // 2
first = i
v += ((n - first) * (n - first - 1)) // 2
tot = (n * (n - 1)) // 2
if tot == u or tot == v:
# Special case for all ties in both ranks
return KendalltauResult(np.nan, np.nan)
# Prevent overflow; equal to np.sqrt((tot - u) * (tot - v))
denom = np.exp(0.5 * (np.log(tot - u) + np.log(tot - v)))
tau = ((tot - (v + u - t)) - 2.0 * exchanges) / denom
# what follows reproduces the ending of Gary Strangman's original
# stats.kendalltau() in SciPy
svar = (4.0 * n + 10.0) / (9.0 * n * (n - 1))
z = tau / np.sqrt(svar)
prob = special.erfc(np.abs(z) / 1.4142136)
return KendalltauResult(tau, prob)
#####################################
# INFERENTIAL STATISTICS #
#####################################
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculates the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
sample observation
popmean : float or array_like
expected value in null hypothesis, if array_like than it must have the
same shape as `a` excluding the axis dimension
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / float(n))
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
df = ((vn1 + vn2)**2) / ((vn1**2) / (n1 - 1) + (vn2**2) / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / float(df)
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics
pvalue : float or array
The two-tailed p-value.
See also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
# check both a and b
contains_nan, nan_policy = (_contains_nan(a, nan_policy) or
_contains_nan(b, nan_policy))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculates the T-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float or array
t-statistic
pvalue : float or array
two-tailed p-value
Notes
-----
Examples for the use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
http://en.wikipedia.org/wiki/T-test#Dependent_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
# check both a and b
contains_nan, nan_policy = (_contains_nan(a, nan_policy) or
_contains_nan(b, nan_policy))
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_rel(a, b, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / float(n))
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution G(x) of an observed
random variable against a given distribution F(x). Under the null
hypothesis the two distributions are identical, G(x)=F(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less','greater'}, optional
Defines the alternative hypothesis (see explanation above).
Default is 'two-sided'.
mode : 'approx' (default) or 'asymp', optional
Defines the distribution used for calculating the p-value.
- 'approx' : use approximation to exact distribution of test statistic
- 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function F(x) of the
hypothesis, ``G(x)<=F(x)``, resp. ``G(x)>=F(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D,
distributions.kstwobign.sf(D * np.sqrt(N)))
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
`lambda_` gives the power in the Cressie-Read power divergence
statistic. The default is 1. For convenience, `lambda_` may be
assigned one of the following strings, in which case the
corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", http://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.atleast_1d(np.asanyarray(f_exp))
else:
# Compute the equivalent of
# f_exp = f_obs.mean(axis=axis, keepdims=True)
# Older versions of numpy do not have the 'keepdims' argument, so
# we have to do a little work to achieve the same result.
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = np.atleast_1d(f_obs.mean(axis=axis))
if axis is not None:
reduced_shape = list(f_obs.shape)
reduced_shape[axis] = 1
f_exp.shape = reduced_shape
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
Power_divergenceResult = namedtuple('Power_divergenceResult', ('statistic',
'pvalue'))
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculates a one-way chi square test.
The chi square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
power_divergence
mstats.chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8. http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", http://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
def ks_2samp(data1, data2):
"""
Computes the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different
Returns
-------
statistic : float
KS statistic
pvalue : float
two-tailed p-value
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample K-S test, the distribution is
assumed to be continuous.
This is the two-sided test, one-sided tests are not implemented.
The test uses the two-sided asymptotic Kolmogorov-Smirnov distribution.
If the K-S statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333337, 4.6674975515806989e-005)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14498781825751686)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
data_all = np.concatenate([data1, data2])
cdf1 = np.searchsorted(data1, data_all, side='right') / (1.0*n1)
cdf2 = np.searchsorted(data2, data_all, side='right') / (1.0*n2)
d = np.max(np.absolute(cdf1 - cdf2))
# Note: d absolute not signed distance
en = np.sqrt(n1 * n2 / float(n1 + n2))
try:
prob = distributions.kstwobign.sf((en + 0.12 + 0.11 / en) * d)
except:
prob = 1.0
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
return Ks_2sampResult(d, prob)
def mannwhitneyu(x, y, use_continuity=True, alternative='two-sided'):
"""
Computes the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
Returns
-------
statistic : float
The Mann-Whitney statistics.
pvalue : float
One-sided p-value assuming a asymptotic normal distribution.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
The reported p-value is for a one-sided hypothesis, to get the two-sided
p-value multiply the returned p-value by 2.
"""
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in amannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
fact2 = 1
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative == 'less':
z = u1 - meanrank
elif alternative == 'greater':
z = u2 - meanrank
elif alternative == 'two-sided':
bigu = max(u1, u2)
z = np.abs(bigu - meanrank)
fact2 = 2.
else:
raise ValueError("alternative should be 'less', 'greater'"
"or 'two-sided'")
z = z / sd
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic',
'pvalue'))
return MannwhitneyuResult(u2, distributions.norm.sf(z) * fact2)
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed
pvalue : float
The two-sided p-value of the test
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
return RanksumsResult(z, prob)
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post-hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution
See Also
--------
f_oneway : 1-way ANOVA
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] http://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.27272727272727337, pvalue=0.60150813444058948)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.030197383422318501)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / float(n[i])
totaln = np.sum(n)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
def friedmanchisquare(*args):
"""
Computes the Friedman test for repeated measurements
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
the test statistic, correcting for ties
pvalue : float
the associated p-value assuming that the test statistic has a chi
squared distribution
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] http://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('\nLess than 3 levels. Friedman test not appropriate.\n')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / float(k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Methods for combining the p-values of independent tests bearing upon the
same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'stouffer'}, optional
Name of method to use to combine p-values. The following methods are
available:
- "fisher": Fisher's method (Fisher's combined probability test),
the default.
- "stouffer": Stouffer's Z-score method.
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method:
- "fisher": The chi-squared statistic
- "stouffer": The Z-score
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [3]_ [4]_.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] http://en.wikipedia.org/wiki/Fisher's_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [4] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [5] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
Xsq = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(Xsq, 2 * len(pvalues))
return (Xsq, pval)
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
Z = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(Z)
return (Z, pval)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher' or 'stouffer'", method)
#####################################
# PROBABILITY CALCULATIONS #
#####################################
@np.deprecate(message="stats.chisqprob is deprecated in scipy 0.17.0; "
"use stats.distributions.chi2.sf instead.")
def chisqprob(chisq, df):
"""
Probability value (1-tail) for the Chi^2 probability distribution.
Broadcasting rules apply.
Parameters
----------
chisq : array_like or float > 0
df : array_like or float, probably int >= 1
Returns
-------
chisqprob : ndarray
The area from `chisq` to infinity under the Chi^2 probability
distribution with degrees of freedom `df`.
"""
return distributions.chi2.sf(chisq, df)
@np.deprecate(message="stats.betai is deprecated in scipy 0.17.0; "
"use special.betainc instead")
def betai(a, b, x):
"""
Returns the incomplete beta function.
I_x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a.
The standard broadcasting rules apply to a, b, and x.
Parameters
----------
a : array_like or float > 0
b : array_like or float > 0
x : array_like or float
x will be clipped to be no greater than 1.0 .
Returns
-------
betai : ndarray
Incomplete beta function.
"""
return _betai(a, b, x)
def _betai(a, b, x):
x = np.asarray(x)
x = np.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
#####################################
# ANOVA CALCULATIONS #
#####################################
@np.deprecate(message="stats.f_value_wilks_lambda deprecated in scipy 0.17.0")
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivarite data, per
Maxwell & Delaney p.657.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
lmbda = linalg.det(EF) / linalg.det(ER)
if (a-1)**2 + (b-1)**2 == 5:
q = 1
else:
q = np.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
@np.deprecate(message="stats.f_value deprecated in scipy 0.17.0")
def f_value(ER, EF, dfR, dfF):
"""
Returns an F-statistic for a restricted vs. unrestricted model.
Parameters
----------
ER : float
`ER` is the sum of squared residuals for the restricted model
or null hypothesis
EF : float
`EF` is the sum of squared residuals for the unrestricted model
or alternate hypothesis
dfR : int
`dfR` is the degrees of freedom in the restricted model
dfF : int
`dfF` is the degrees of freedom in the unrestricted model
Returns
-------
F-statistic : float
"""
return (ER - EF) / float(dfR - dfF) / (EF / float(dfF))
@np.deprecate(message="stats.f_value_multivariate deprecated in scipy 0.17.0")
def f_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns a multivariate F-statistic.
Parameters
----------
ER : ndarray
Error associated with the null hypothesis (the Restricted model).
From a multivariate F calculation.
EF : ndarray
Error associated with the alternate hypothesis (the Full model)
From a multivariate F calculation.
dfnum : int
Degrees of freedom the Restricted model.
dfden : int
Degrees of freedom associated with the Restricted model.
Returns
-------
fstat : float
The computed F-statistic.
"""
if isinstance(ER, (int, float)):
ER = array([[ER]])
if isinstance(EF, (int, float)):
EF = array([[EF]])
n_um = (linalg.det(ER) - linalg.det(EF)) / float(dfnum)
d_en = linalg.det(EF) / float(dfden)
return n_um / d_en
#####################################
# SUPPORT FUNCTIONS #
#####################################
@np.deprecate(message="scipy.stats.ss is deprecated in scipy 0.17.0")
def ss(a, axis=0):
return _sum_of_squares(a, axis)
def _sum_of_squares(a, axis=0):
"""
Squares each element of the input array, and returns the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
@np.deprecate(message="scipy.stats.square_of_sums is deprecated "
"in scipy 0.17.0")
def square_of_sums(a, axis=0):
return _square_of_sums(a, axis)
def _square_of_sums(a, axis=0):
"""
Sums elements of the input array, and returns the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
@np.deprecate(message="scipy.stats.fastsort is deprecated in scipy 0.16.0")
def fastsort(a):
"""
Sort an array and provide the argsort.
Parameters
----------
a : array_like
Input array.
Returns
-------
fastsort : ndarray of type int
sorted indices into the original array
"""
# TODO: the wording in the docstring is nonsense.
it = np.argsort(a)
as_ = a[it]
return as_, it
| bsd-3-clause |
ndingwall/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 1 | 20589 | import pytest
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition._pls import (
_center_scale_xy,
_get_first_singular_vectors_power_method,
_get_first_singular_vectors_svd,
_svd_flip_1d
)
from sklearn.cross_decomposition import CCA
from sklearn.cross_decomposition import PLSSVD, PLSRegression, PLSCanonical
from sklearn.datasets import make_regression
from sklearn.utils import check_random_state
from sklearn.utils.extmath import svd_flip
from sklearn.exceptions import ConvergenceWarning
def assert_matrix_orthogonal(M):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)))
def test_pls_canonical_basics():
# Basic checks for PLSCanonical
d = load_linnerud()
X = d.data
Y = d.target
pls = PLSCanonical(n_components=X.shape[1])
pls.fit(X, Y)
assert_matrix_orthogonal(pls.x_weights_)
assert_matrix_orthogonal(pls.y_weights_)
assert_matrix_orthogonal(pls._x_scores)
assert_matrix_orthogonal(pls._y_scores)
# Check X = TP' and Y = UQ'
T = pls._x_scores
P = pls.x_loadings_
U = pls._y_scores
Q = pls.y_loadings_
# Need to scale first
Xc, Yc, x_mean, y_mean, x_std, y_std = _center_scale_xy(
X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T))
assert_array_almost_equal(Yc, np.dot(U, Q.T))
# Check that rotations on training data lead to scores
Xt = pls.transform(X)
assert_array_almost_equal(Xt, pls._x_scores)
Xt, Yt = pls.transform(X, Y)
assert_array_almost_equal(Xt, pls._x_scores)
assert_array_almost_equal(Yt, pls._y_scores)
# Check that inverse_transform works
X_back = pls.inverse_transform(Xt)
assert_array_almost_equal(X_back, X)
def test_sanity_check_pls_regression():
# Sanity check for PLSRegression
# The results were checked against the R-packages plspm, misOmics and pls
d = load_linnerud()
X = d.data
Y = d.target
pls = PLSRegression(n_components=X.shape[1])
pls.fit(X, Y)
expected_x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
expected_x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
expected_y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
expected_y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(np.abs(pls.x_loadings_),
np.abs(expected_x_loadings))
assert_array_almost_equal(np.abs(pls.x_weights_),
np.abs(expected_x_weights))
assert_array_almost_equal(np.abs(pls.y_loadings_),
np.abs(expected_y_loadings))
assert_array_almost_equal(np.abs(pls.y_weights_),
np.abs(expected_y_weights))
# The R / Python difference in the signs should be consistent across
# loadings, weights, etc.
x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings)
x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings)
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip)
def test_sanity_check_pls_regression_constant_column_Y():
# Check behavior when the first column of Y is constant
# The results are checked against a modified version of plsreg2
# from the R-package plsdepot
d = load_linnerud()
X = d.data
Y = d.target
Y[:, 0] = 1
pls = PLSRegression(n_components=X.shape[1])
pls.fit(X, Y)
expected_x_weights = np.array(
[[-0.6273573, 0.007081799, 0.7786994],
[-0.7493417, -0.277612681, -0.6011807],
[-0.2119194, 0.960666981, -0.1794690]])
expected_x_loadings = np.array(
[[-0.6273512, -0.22464538, 0.7786994],
[-0.6643156, -0.09871193, -0.6011807],
[-0.5125877, 1.01407380, -0.1794690]])
expected_y_loadings = np.array(
[[0.0000000, 0.0000000, 0.0000000],
[0.4357300, 0.5828479, 0.2174802],
[-0.1353739, -0.2486423, -0.1810386]])
assert_array_almost_equal(np.abs(expected_x_weights),
np.abs(pls.x_weights_))
assert_array_almost_equal(np.abs(expected_x_loadings),
np.abs(pls.x_loadings_))
# For the PLSRegression with default parameters, y_loadings == y_weights
assert_array_almost_equal(np.abs(pls.y_loadings_),
np.abs(expected_y_loadings))
assert_array_almost_equal(np.abs(pls.y_weights_),
np.abs(expected_y_loadings))
x_loadings_sign_flip = np.sign(expected_x_loadings / pls.x_loadings_)
x_weights_sign_flip = np.sign(expected_x_weights / pls.x_weights_)
y_loadings_sign_flip = np.sign(expected_y_loadings / pls.y_loadings_)
assert_array_equal(x_loadings_sign_flip, x_weights_sign_flip)
assert_array_equal(x_loadings_sign_flip[1:], y_loadings_sign_flip[1:])
def test_sanity_check_pls_canonical():
# Sanity check for PLSCanonical
# The results were checked against the R-package plspm
d = load_linnerud()
X = d.data
Y = d.target
pls = PLSCanonical(n_components=X.shape[1])
pls .fit(X, Y)
expected_x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
expected_x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
expected_y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
expected_y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(np.abs(pls.x_rotations_),
np.abs(expected_x_rotations))
assert_array_almost_equal(np.abs(pls.x_weights_),
np.abs(expected_x_weights))
assert_array_almost_equal(np.abs(pls.y_rotations_),
np.abs(expected_y_rotations))
assert_array_almost_equal(np.abs(pls.y_weights_),
np.abs(expected_y_weights))
x_rotations_sign_flip = np.sign(pls.x_rotations_ / expected_x_rotations)
x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
y_rotations_sign_flip = np.sign(pls.y_rotations_ / expected_y_rotations)
y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
assert_array_almost_equal(x_rotations_sign_flip, x_weights_sign_flip)
assert_array_almost_equal(y_rotations_sign_flip, y_weights_sign_flip)
assert_matrix_orthogonal(pls.x_weights_)
assert_matrix_orthogonal(pls.y_weights_)
assert_matrix_orthogonal(pls._x_scores)
assert_matrix_orthogonal(pls._y_scores)
def test_sanity_check_pls_canonical_random():
# Sanity check for PLSCanonical on random data
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
rng = check_random_state(11)
l1 = rng.normal(size=n)
l2 = rng.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + rng.normal(size=4 * n).reshape((n, 4))
Y = latents + rng.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, rng.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, rng.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
pls = PLSCanonical(n_components=3)
pls.fit(X, Y)
expected_x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
expected_x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
expected_y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
expected_y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(np.abs(pls.x_loadings_),
np.abs(expected_x_loadings))
assert_array_almost_equal(np.abs(pls.x_weights_),
np.abs(expected_x_weights))
assert_array_almost_equal(np.abs(pls.y_loadings_),
np.abs(expected_y_loadings))
assert_array_almost_equal(np.abs(pls.y_weights_),
np.abs(expected_y_weights))
x_loadings_sign_flip = np.sign(pls.x_loadings_ / expected_x_loadings)
x_weights_sign_flip = np.sign(pls.x_weights_ / expected_x_weights)
y_weights_sign_flip = np.sign(pls.y_weights_ / expected_y_weights)
y_loadings_sign_flip = np.sign(pls.y_loadings_ / expected_y_loadings)
assert_array_almost_equal(x_loadings_sign_flip, x_weights_sign_flip)
assert_array_almost_equal(y_loadings_sign_flip, y_weights_sign_flip)
assert_matrix_orthogonal(pls.x_weights_)
assert_matrix_orthogonal(pls.y_weights_)
assert_matrix_orthogonal(pls._x_scores)
assert_matrix_orthogonal(pls._y_scores)
def test_convergence_fail():
# Make sure ConvergenceWarning is raised if max_iter is too small
d = load_linnerud()
X = d.data
Y = d.target
pls_nipals = PLSCanonical(n_components=X.shape[1], max_iter=2)
with pytest.warns(ConvergenceWarning):
pls_nipals.fit(X, Y)
@pytest.mark.filterwarnings('ignore:.*scores_ was deprecated') # 0.26
@pytest.mark.parametrize('Est', (PLSSVD, PLSRegression, PLSCanonical))
def test_attibutes_shapes(Est):
# Make sure attributes are of the correct shape depending on n_components
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
pls = Est(n_components=n_components)
pls.fit(X, Y)
assert all(attr.shape[1] == n_components
for attr in (pls.x_scores_, pls.y_scores_, pls.x_weights_,
pls.y_weights_))
@pytest.mark.parametrize('Est', (PLSRegression, PLSCanonical, CCA))
def test_univariate_equivalence(Est):
# Ensure 2D Y with 1 column is equivalent to 1D Y
d = load_linnerud()
X = d.data
Y = d.target
est = Est(n_components=1)
one_d_coeff = est.fit(X, Y[:, 0]).coef_
two_d_coeff = est.fit(X, Y[:, :1]).coef_
assert one_d_coeff.shape == two_d_coeff.shape
assert_array_almost_equal(one_d_coeff, two_d_coeff)
@pytest.mark.parametrize('Est', (PLSRegression, PLSCanonical, CCA, PLSSVD))
def test_copy(Est):
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
X_orig = X.copy()
# copy=True won't modify inplace
pls = Est(copy=True).fit(X, Y)
assert_array_equal(X, X_orig)
# copy=False will modify inplace
with pytest.raises(AssertionError):
Est(copy=False).fit(X, Y)
assert_array_almost_equal(X, X_orig)
if Est is PLSSVD:
return # PLSSVD does not support copy param in predict or transform
X_orig = X.copy()
with pytest.raises(AssertionError):
pls.transform(X, Y, copy=False),
assert_array_almost_equal(X, X_orig)
X_orig = X.copy()
with pytest.raises(AssertionError):
pls.predict(X, copy=False),
assert_array_almost_equal(X, X_orig)
# Make sure copy=True gives same transform and predictions as predict=False
assert_array_almost_equal(pls.transform(X, Y, copy=True),
pls.transform(X.copy(), Y.copy(), copy=False))
assert_array_almost_equal(pls.predict(X, copy=True),
pls.predict(X.copy(), copy=False))
def _generate_test_scale_and_stability_datasets():
"""Generate dataset for test_scale_and_stability"""
# dataset for non-regression 7818
rng = np.random.RandomState(0)
n_samples = 1000
n_targets = 5
n_features = 10
Q = rng.randn(n_targets, n_features)
Y = rng.randn(n_samples, n_targets)
X = np.dot(Y, Q) + 2 * rng.randn(n_samples, n_features) + 1
X *= 1000
yield X, Y
# Data set where one of the features is constaint
X, Y = load_linnerud(return_X_y=True)
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
yield X, Y
X = np.array([[0., 0., 1.],
[1., 0., 0.],
[2., 2., 2.],
[3., 5., 4.]])
Y = np.array([[0.1, -0.2],
[0.9, 1.1],
[6.2, 5.9],
[11.9, 12.3]])
yield X, Y
# Seeds that provide a non-regression test for #18746, where CCA fails
seeds = [530, 741]
for seed in seeds:
rng = np.random.RandomState(seed)
X = rng.randn(4, 3)
Y = rng.randn(4, 2)
yield X, Y
@pytest.mark.parametrize('Est', (CCA, PLSCanonical, PLSRegression, PLSSVD))
@pytest.mark.parametrize('X, Y', _generate_test_scale_and_stability_datasets())
def test_scale_and_stability(Est, X, Y):
"""scale=True is equivalent to scale=False on centered/scaled data
This allows to check numerical stability over platforms as well"""
X_s, Y_s, *_ = _center_scale_xy(X, Y)
X_score, Y_score = Est(scale=True).fit_transform(X, Y)
X_s_score, Y_s_score = Est(scale=False).fit_transform(X_s, Y_s)
assert_allclose(X_s_score, X_score, atol=1e-4)
assert_allclose(Y_s_score, Y_score, atol=1e-4)
@pytest.mark.parametrize('Est', (PLSSVD, PLSCanonical, CCA))
@pytest.mark.parametrize('n_components', (0, 4))
def test_n_components_bounds(Est, n_components):
# n_components should be in [1, min(n_samples, n_features, n_targets)]
# TODO: catch error instead of warning in 0.26
rng = np.random.RandomState(0)
X = rng.randn(10, 5)
Y = rng.randn(10, 3)
est = Est(n_components=n_components)
with pytest.warns(FutureWarning,
match="n_components=3 will be used instead"):
est.fit(X, Y)
# make sure upper bound of rank is used as a fallback
assert est.transform(X).shape[1] == 3
@pytest.mark.parametrize('n_components', (0, 6))
def test_n_components_bounds_pls_regression(n_components):
# For PLSRegression, the upper bound for n_components is n_features
# TODO: catch error instead of warning in 0.26
rng = np.random.RandomState(0)
X = rng.randn(10, 5)
Y = rng.randn(10, 3)
est = PLSRegression(n_components=n_components)
with pytest.warns(FutureWarning,
match="n_components=5 will be used instead"):
est.fit(X, Y)
# make sure upper bound of rank is used as a fallback
assert est.transform(X).shape[1] == 5
@pytest.mark.parametrize('Est', (PLSSVD, CCA, PLSCanonical))
def test_scores_deprecations(Est):
# Make sure x_scores_ and y_scores_ are deprecated.
# It's not deprecated for PLSRegression because y_score_ is different from
# transform(Y_train)
# TODO: remove attributes and test in 0.26
rng = np.random.RandomState(0)
X = rng.randn(10, 5)
Y = rng.randn(10, 3)
est = Est().fit(X, Y)
with pytest.warns(FutureWarning, match="x_scores_ was deprecated"):
assert_allclose(est.x_scores_, est.transform(X))
with pytest.warns(FutureWarning, match="y_scores_ was deprecated"):
assert_allclose(est.y_scores_, est.transform(X, Y)[1])
@pytest.mark.parametrize('Est', (PLSRegression, PLSCanonical, CCA))
def test_norm_y_weights_deprecation(Est):
rng = np.random.RandomState(0)
X = rng.randn(10, 5)
Y = rng.randn(10, 3)
est = Est().fit(X, Y)
with pytest.warns(FutureWarning, match="norm_y_weights was deprecated"):
est.norm_y_weights
# TODO: Remove test in 0.26
@pytest.mark.parametrize('Estimator',
(PLSRegression, PLSCanonical, CCA, PLSSVD))
@pytest.mark.parametrize('attribute',
("x_mean_", "y_mean_", "x_std_", "y_std_"))
def test_mean_and_std_deprecation(Estimator, attribute):
rng = np.random.RandomState(0)
X = rng.randn(10, 5)
Y = rng.randn(10, 3)
estimator = Estimator().fit(X, Y)
with pytest.warns(FutureWarning, match=f"{attribute} was deprecated"):
getattr(estimator, attribute)
@pytest.mark.parametrize('n_samples, n_features', [(100, 10), (100, 200)])
@pytest.mark.parametrize('seed', range(10))
def test_singular_value_helpers(n_samples, n_features, seed):
# Make sure SVD and power method give approximately the same results
X, Y = make_regression(n_samples, n_features, n_targets=5,
random_state=seed)
u1, v1, _ = _get_first_singular_vectors_power_method(X, Y,
norm_y_weights=True)
u2, v2 = _get_first_singular_vectors_svd(X, Y)
_svd_flip_1d(u1, v1)
_svd_flip_1d(u2, v2)
rtol = 1e-1
assert_allclose(u1, u2, rtol=rtol)
assert_allclose(v1, v2, rtol=rtol)
def test_one_component_equivalence():
# PLSSVD, PLSRegression and PLSCanonical should all be equivalent when
# n_components is 1
X, Y = make_regression(100, 10, n_targets=5, random_state=0)
svd = PLSSVD(n_components=1).fit(X, Y).transform(X)
reg = PLSRegression(n_components=1).fit(X, Y).transform(X)
canonical = PLSCanonical(n_components=1).fit(X, Y).transform(X)
assert_allclose(svd, reg, rtol=1e-2)
assert_allclose(svd, canonical, rtol=1e-2)
def test_svd_flip_1d():
# Make sure svd_flip_1d is equivalent to svd_flip
u = np.array([1, -4, 2])
v = np.array([1, 2, 3])
u_expected, v_expected = svd_flip(u.reshape(-1, 1), v.reshape(1, -1))
_svd_flip_1d(u, v) # inplace
assert_allclose(u, u_expected.ravel())
assert_allclose(u, [-1, 4, -2])
assert_allclose(v, v_expected.ravel())
assert_allclose(v, [-1, -2, -3])
| bsd-3-clause |
jburos/survivalstan | test/test_pem_survival_model_sim.py | 1 | 2040 |
import matplotlib as mpl
mpl.use('Agg')
import survivalstan
from stancache import stancache
import numpy as np
from nose.tools import ok_
from functools import partial
num_iter = 500
from .test_datasets import load_test_dataset_long, sim_test_dataset_long
model_code = survivalstan.models.pem_survival_model
make_inits = None
def test_pem_model_sim(**kwargs):
''' Test weibull survival model on simulated dataset
'''
dlong = sim_test_dataset_long()
testfit = survivalstan.fit_stan_survival_model(
model_cohort = 'test model',
model_code = model_code,
df = dlong,
sample_col = 'index',
timepoint_end_col = 'end_time',
event_col = 'end_failure',
formula = '~ 1',
iter = num_iter,
chains = 2,
seed = 9001,
make_inits = make_inits,
FIT_FUN = stancache.cached_stan_fit,
**kwargs
)
ok_('fit' in testfit)
ok_('coefs' in testfit)
ok_('loo' in testfit)
survivalstan.utils.plot_coefs([testfit])
survivalstan.utils.plot_coefs([testfit], trans=np.exp)
survivalstan.utils.plot_coefs([testfit], element='baseline')
return(testfit)
def test_pem_model_sim_covar(**kwargs):
''' Test weibull survival model on simulated dataset
'''
dlong = sim_test_dataset_long()
testfit = survivalstan.fit_stan_survival_model(
model_cohort = 'test model',
model_code = model_code,
df = dlong,
sample_col = 'index',
timepoint_end_col = 'end_time',
event_col = 'end_failure',
formula = '~ age + sex',
iter = num_iter,
chains = 2,
seed = 9001,
make_inits = make_inits,
FIT_FUN = stancache.cached_stan_fit,
**kwargs
)
ok_('fit' in testfit)
ok_('coefs' in testfit)
ok_('loo' in testfit)
survivalstan.utils.plot_coefs([testfit])
survivalstan.utils.plot_coefs([testfit], trans=np.exp)
survivalstan.utils.plot_coefs([testfit], element='baseline')
return(testfit)
| apache-2.0 |
myuuuuun/RepeatedMatrixGame | PrisonersDilemma/experiment3/contest1/contest1.py | 2 | 1238 | #-*- encoding: utf-8 -*-
'''
Simulate finite repeated symmetric matrix game.
Copyright (c) 2015 @myuuuuun
Contest - perfect monitoring
'''
import sys
sys.path.append('../')
sys.path.append('../user_strategies')
import numpy as np
import pandas as pd
import play as pl
from Iida_perfect_monitoring import Iida_pm
from kato import KatoStrategy
from ikegami_perfect import Self_Centered_perfect
from mhanami_Public_Strategy import PubStrategy
from tsuyoshi import GrimTrigger
from gistfile1 import MyStrategy
from beeleb_Strategy import beeleb
from oyama import OyamaPerfectMonitoring
from ogawa import ogawa
from yamagishi_impd import yamagishi
np.set_printoptions(precision=3)
if __name__ == '__main__':
payoff = np.array([[4, 0], [5, 2]])
seed = 282
rs = np.random.RandomState(seed)
# 第1期は確率1で来るものとする
discount_v = 0.97
ts_length = rs.geometric(p=1-discount_v, size=1000)
strategies = [Iida_pm, PubStrategy, KatoStrategy, Self_Centered_perfect, GrimTrigger, MyStrategy, beeleb, OyamaPerfectMonitoring, ogawa, yamagishi]
game = pl.RepeatedMatrixGame(payoff, strategies, ts_length=ts_length, repeat=1000)
game.play(mtype="perfect", random_seed=seed, record=True)
| mit |
Statoil/SegyIO | python/segyio/trace.py | 1 | 28059 | import collections
import contextlib
import itertools
import warnings
import sys
try: from future_builtins import zip
except ImportError: pass
import numpy as np
from .line import HeaderLine
from .field import Field
from .utils import castarray
class Sequence(collections.Sequence):
# unify the common optimisations and boilerplate of Trace, RawTrace, and
# Header, which all obey the same index-oriented interface, and all share
# length and wrap-around properties.
#
# It provides a useful negative-wrap index method which deals
# appropriately with IndexError and python2-3 differences.
def __init__(self, length):
self.length = length
def __len__(self):
"""x.__len__() <==> len(x)"""
return self.length
def __iter__(self):
"""x.__iter__() <==> iter(x)"""
# __iter__ has a reasonable default implementation from
# collections.Sequence. It's essentially this loop:
# for i in range(len(self)): yield self[i]
# However, in segyio that means the double-buffering, buffer reuse does
# not happen, which is *much* slower (the allocation of otherwised
# reused numpy objects takes about half the execution time), so
# explicitly implement it as [:]
return self[:]
def wrapindex(self, i):
if i < 0:
i += len(self)
if not 0 <= i < len(self):
# in python2, int-slice comparison does not raise a type error,
# (but returns False), so force a type-error if this still isn't an
# int-like.
_ = i + 0
raise IndexError('trace index out of range')
return i
class Trace(Sequence):
"""
The Trace implements the array interface, where every array element, the
data trace, is a numpy.ndarray. As all arrays, it can be random accessed,
iterated over, and read strided. Data is read lazily from disk, so
iteration does not consume much memory. If you want eager reading, use
Trace.raw.
This mode gives access to reading and writing functionality for traces.
The primary data type is ``numpy.ndarray``. Traces can be accessed
individually or with python slices, and writing is done via assignment.
Notes
-----
.. versionadded:: 1.1
.. versionchanged:: 1.6
common list operations (collections.Sequence)
Examples
--------
Read all traces in file f and store in a list:
>>> l = [numpy.copy(tr) for tr in trace[:]]
Do numpy operations on a trace:
>>> tr = trace[10]
>>> tr = tr * 2
>>> tr = tr - 100
>>> avg = numpy.average(tr)
Double every trace value and write to disk. Since accessing a trace
gives a numpy value, to write to the respective trace we need its index:
>>> for i, tr in enumerate(trace):
... tr = tr * 2
... trace[i] = tr
"""
def __init__(self, filehandle, dtype, tracecount, samples, readonly):
super(Trace, self).__init__(tracecount)
self.filehandle = filehandle
self.dtype = dtype
self.shape = samples
self.readonly = readonly
def __getitem__(self, i):
"""trace[i]
ith trace of the file, starting at 0. trace[i] returns a numpy array,
and changes to this array will *not* be reflected on disk.
When i is a slice, a generator of numpy arrays is returned.
Parameters
----------
i : int or slice
Returns
-------
trace : numpy.ndarray of dtype or generator of numpy.ndarray of dtype
Notes
-----
.. versionadded:: 1.1
Behaves like [] for lists.
.. note::
This operator reads lazily from the file, meaning the file is read
on ``next()``, and only one trace is fixed in memory. This means
segyio can run through arbitrarily large files without consuming
much memory, but it is potentially slow if the goal is to read the
entire file into memory. If that is the case, consider using
`trace.raw`, which reads eagerly.
Examples
--------
Read every other trace:
>>> for tr in trace[::2]:
... print(tr)
Read all traces, last-to-first:
>>> for tr in trace[::-1]:
... tr.mean()
Read a single value. The second [] is regular numpy array indexing, and
supports all numpy operations, including negative indexing and slicing:
>>> trace[0][0]
1490.2
>>> trace[0][1]
1490.8
>>> trace[0][-1]
1871.3
>>> trace[-1][100]
1562.0
"""
try:
i = self.wrapindex(i)
buf = np.zeros(self.shape, dtype = self.dtype)
return self.filehandle.gettr(buf, i, 1, 1)
except TypeError:
# we assume this is a generator. extract the indices-tuple right
# away, because if this is NOT a slice we want to fail with a type
# error right away. If this is done inside gen() then the call will
# succeed with a generator, which fails on first next() with an
# attribute error
try:
indices = i.indices(len(self))
except AttributeError:
msg = 'trace indices must be integers or slices, not {}'
raise TypeError(msg.format(type(i).__name__))
def gen():
# double-buffer the trace. when iterating over a range, we want
# to make sure the visible change happens as late as possible,
# and that in the case of exception the last valid trace was
# untouched. this allows for some fancy control flow, and more
# importantly helps debugging because you can fully inspect and
# interact with the last good value.
x = np.zeros(self.shape, dtype=self.dtype)
y = np.zeros(self.shape, dtype=self.dtype)
for j in range(*indices):
self.filehandle.gettr(x, j, 1, 1)
x, y = y, x
yield y
return gen()
def __setitem__(self, i, val):
"""trace[i] = val
Write the ith trace of the file, starting at 0. It accepts any
array_like, but val must be at least as big as the underlying data
trace.
If val is longer than the underlying trace, it is essentially
truncated.
For the best performance, val should be a numpy.ndarray of sufficient
size and same dtype as the file. segyio will warn on mismatched types,
and attempt a conversion for you.
Data is written immediately to disk. If writing multiple traces at
once, and a write fails partway through, the resulting file is left in
an unspecified state.
Parameters
----------
i : int or slice
val : array_like
Notes
-----
.. versionadded:: 1.1
Behaves like [] for lists.
Examples
--------
Write a single trace:
>>> trace[10] = list(range(1000))
Write multiple traces:
>>> trace[10:15] = np.array([cube[i] for i in range(5)])
Write multiple traces with stride:
>>> trace[10:20:2] = np.array([cube[i] for i in range(5)])
"""
if isinstance(i, slice):
for j, x in zip(range(*i.indices(len(self))), val):
self[j] = x
return
xs = castarray(val, self.dtype)
# TODO: check if len(xs) > shape, and optionally warn on truncating
# writes
self.filehandle.puttr(self.wrapindex(i), xs)
def __repr__(self):
return "Trace(traces = {}, samples = {})".format(len(self), self.shape)
@property
def raw(self):
"""
An eager version of Trace
Returns
-------
raw : RawTrace
"""
return RawTrace(self.filehandle,
self.dtype,
len(self),
self.shape,
self.readonly,
)
@property
@contextlib.contextmanager
def ref(self):
"""
A write-back version of Trace
Returns
-------
ref : RefTrace
`ref` is returned in a context manager, and must be in a ``with``
statement
Notes
-----
.. versionadded:: 1.6
Examples
--------
>>> with trace.ref as ref:
... ref[10] += 1.617
"""
x = RefTrace(self.filehandle,
self.dtype,
len(self),
self.shape,
self.readonly,
)
yield x
x.flush()
class RawTrace(Trace):
"""
Behaves exactly like trace, except reads are done eagerly and returned as
numpy.ndarray, instead of generators of numpy.ndarray.
"""
def __init__(self, *args):
super(RawTrace, self).__init__(*args)
def __getitem__(self, i):
"""trace[i]
Eagerly read the ith trace of the file, starting at 0. trace[i] returns
a numpy array, and changes to this array will *not* be reflected on
disk.
When i is a slice, this returns a 2-dimensional numpy.ndarray .
Parameters
----------
i : int or slice
Returns
-------
trace : numpy.ndarray of dtype
Notes
-----
.. versionadded:: 1.1
Behaves like [] for lists.
.. note::
Reading this way is more efficient if you know you can afford the
extra memory usage. It reads the requested traces immediately to
memory.
"""
try:
i = self.wrapindex(i)
buf = np.zeros(self.shape, dtype = self.dtype)
return self.filehandle.gettr(buf, i, 1, 1)
except TypeError:
try:
indices = i.indices(len(self))
except AttributeError:
msg = 'trace indices must be integers or slices, not {}'
raise TypeError(msg.format(type(i).__name__))
start, _, step = indices
length = len(range(*indices))
buf = np.empty((length, self.shape), dtype = self.dtype)
return self.filehandle.gettr(buf, start, step, length)
def fingerprint(x):
return hash(bytes(x.data))
class RefTrace(Trace):
"""
Behaves like trace, except changes to the returned numpy arrays *are*
reflected on disk. Operations have to be in-place on the numpy array, so
assignment on a trace will not work.
This feature exists to support code like::
>>> with ref as r:
... for x, y in zip(r, src):
... numpy.copyto(x, y + 10)
This class is not meant to be instantiated directly, but returned by
:attr:`Trace.ref`. This feature requires a context manager, to guarantee
modifications are written back to disk.
"""
def __init__(self, *args):
super(RefTrace, self).__init__(*args)
self.refs = {}
def flush(self):
"""
Commit cached writes to the file handle. Does not flush libc buffers or
notifies the kernel, so these changes may not immediately be visible to
other processes.
Updates the fingerprints whena writes happen, so successive ``flush()``
invocations are no-ops.
It is not necessary to call this method in user code.
Notes
-----
.. versionadded:: 1.6
This method is not intended as user-oriented functionality, but might
be useful in certain contexts to provide stronger guarantees.
"""
garbage = []
for i, (x, signature) in self.refs.items():
if sys.getrefcount(x) == 3:
garbage.append(i)
if fingerprint(x) == signature: continue
self.filehandle.puttr(i, x)
signature = fingerprint(x)
# to avoid too many resource leaks, when this dict is the only one
# holding references to already-produced traces, clear them
for i in garbage:
del self.refs[i]
def fetch(self, i, buf = None):
if buf is None:
buf = np.zeros(self.shape, dtype = self.dtype)
try:
self.filehandle.gettr(buf, i, 1, 1)
except IOError:
if not self.readonly:
# if the file is opened read-only and this happens, there's no
# way to actually write and the error is an actual error
buf.fill(0)
else: raise
return buf
def __getitem__(self, i):
"""trace[i]
Read the ith trace of the file, starting at 0. trace[i] returns a numpy
array, but unlike Trace, changes to this array *will* be reflected on
disk. The modifications must happen to the actual array (views are ok),
so in-place operations work, but assignments will not::
>>> with ref as ref:
... x = ref[10]
... x += 1.617 # in-place, works
... numpy.copyto(x, x + 10) # works
... x = x + 10 # re-assignment, won't change the original x
Works on newly created files that has yet to have any traces written,
which opens up a natural way of filling newly created files with data.
When getting unwritten traces, a trace filled with zeros is returned.
Parameters
----------
i : int or slice
Returns
-------
trace : numpy.ndarray of dtype
Notes
-----
.. versionadded:: 1.6
Behaves like [] for lists.
Examples
--------
Merge two files with a binary operation. Relies on python3 iterator
zip:
>>> with ref as ref:
... for x, lhs, rhs in zip(ref, L, R):
... numpy.copyto(x, lhs + rhs)
Create a file and fill with data (the repeated trace index):
>>> f = create()
>>> with f.trace.ref as ref:
... for i, x in enumerate(ref):
... x.fill(i)
"""
try:
i = self.wrapindex(i)
# we know this class is only used in context managers, so we know
# refs don't escape (with expectation of being written), so
# preserve all refs yielded with getitem(int)
#
# using ref[int] is problematic and pointless, we need to handle
# this scenario gracefully:
# with f.trace.ref as ref:
# x = ref[10]
# x[5] = 0
# # invalidate other refs
# y = ref[11]
# y[6] = 1.6721
#
# # if we don't preserve returned individual getitems, this
# # write is lost
# x[5] = 52
#
# for slices, we know that references terminate with every
# iteration anyway, multiple live references cannot happen
if i in self.refs:
return self.refs[i][0]
x = self.fetch(i)
self.refs[i] = (x, fingerprint(x))
return x
except TypeError:
try:
indices = i.indices(len(self))
except AttributeError:
msg = 'trace indices must be integers or slices, not {}'
raise TypeError(msg.format(type(i).__name__))
def gen():
x = np.zeros(self.shape, dtype = self.dtype)
try:
for j in range(*indices):
x = self.fetch(j, x)
y = fingerprint(x)
yield x
if not fingerprint(x) == y:
self.filehandle.puttr(j, x)
finally:
# the last yielded item is available after the loop, so
# preserve it and check if it's been updated on exit
self.refs[j] = (x, y)
return gen()
class Header(Sequence):
"""Interact with segy in header mode
This mode gives access to reading and writing functionality of headers,
both in individual (trace) mode and line mode. The returned header
implements a dict_like object with a fixed set of keys, given by the SEG-Y
standard.
The Header implements the array interface, where every array element, the
data trace, is a numpy.ndarray. As all arrays, it can be random accessed,
iterated over, and read strided. Data is read lazily from disk, so
iteration does not consume much memory.
Notes
-----
.. versionadded:: 1.1
.. versionchanged:: 1.6
common list operations (collections.Sequence)
"""
def __init__(self, segy):
self.segy = segy
super(Header, self).__init__(segy.tracecount)
def __getitem__(self, i):
"""header[i]
ith header of the file, starting at 0.
Parameters
----------
i : int or slice
Returns
-------
field : Field
dict_like header
Notes
-----
.. versionadded:: 1.1
Behaves like [] for lists.
Examples
--------
Reading a header:
>>> header[10]
Read a field in the first 5 headers:
>>> [x[25] for x in header[:5]]
[1, 2, 3, 4]
Read a field in every other header:
>>> [x[37] for x in header[::2]]
[1, 3, 1, 3, 1, 3]
"""
try:
i = self.wrapindex(i)
return Field.trace(traceno = i, segy = self.segy)
except TypeError:
try:
indices = i.indices(len(self))
except AttributeError:
msg = 'trace indices must be integers or slices, not {}'
raise TypeError(msg.format(type(i).__name__))
def gen():
# double-buffer the header. when iterating over a range, we
# want to make sure the visible change happens as late as
# possible, and that in the case of exception the last valid
# header was untouched. this allows for some fancy control
# flow, and more importantly helps debugging because you can
# fully inspect and interact with the last good value.
x = Field.trace(None, self.segy)
buf = bytearray(x.buf)
for j in range(*indices):
# skip re-invoking __getitem__, just update the buffer
# directly with fetch, and save some initialisation work
buf = x.fetch(buf, j)
x.buf[:] = buf
x.traceno = j
yield x
return gen()
def __setitem__(self, i, val):
"""header[i] = val
Write the ith header of the file, starting at 0. Unlike data traces
(which return numpy.ndarrays), changes to returned headers being
iterated over *will* be reflected on disk.
Parameters
----------
i : int or slice
val : Field or array_like of dict_like
Notes
-----
.. versionadded:: 1.1
Behaves like [] for lists
Examples
--------
Copy a header to a different trace:
>>> header[28] = header[29]
Write multiple fields in a trace:
>>> header[10] = { 37: 5, TraceField.INLINE_3D: 2484 }
Set a fixed set of values in all headers:
>>> for x in header[:]:
... x[37] = 1
... x.update({ TraceField.offset: 1, 2484: 10 })
Write a field in multiple headers
>>> for x in header[:10]:
... x.update({ TraceField.offset : 2 })
Write a field in every other header:
>>> for x in header[::2]:
... x.update({ TraceField.offset : 2 })
"""
x = self[i]
try:
x.update(val)
except AttributeError:
if isinstance(val, Field) or isinstance(val, dict):
val = itertools.repeat(val)
for h, v in zip(x, val):
h.update(v)
@property
def iline(self):
"""
Headers, accessed by inline
Returns
-------
line : HeaderLine
"""
return HeaderLine(self, self.segy.iline, 'inline')
@iline.setter
def iline(self, value):
"""Write iterables to lines
Examples:
Supports writing to *all* crosslines via assignment, regardless of
data source and format. Will respect the sample size and structure
of the file being assigned to, so if the argument traces are longer
than that of the file being written to the surplus data will be
ignored. Uses same rules for writing as `f.iline[i] = x`.
"""
for i, src in zip(self.segy.ilines, value):
self.iline[i] = src
@property
def xline(self):
"""
Headers, accessed by crossline
Returns
-------
line : HeaderLine
"""
return HeaderLine(self, self.segy.xline, 'crossline')
@xline.setter
def xline(self, value):
"""Write iterables to lines
Examples:
Supports writing to *all* crosslines via assignment, regardless of
data source and format. Will respect the sample size and structure
of the file being assigned to, so if the argument traces are longer
than that of the file being written to the surplus data will be
ignored. Uses same rules for writing as `f.xline[i] = x`.
"""
for i, src in zip(self.segy.xlines, value):
self.xline[i] = src
class Attributes(Sequence):
"""File-wide attribute (header word) reading
Lazily read a single header word for every trace in the file. The
Attributes implement the array interface, and will behave as expected when
indexed and sliced.
Notes
-----
.. versionadded:: 1.1
"""
def __init__(self, field, filehandle, tracecount):
super(Attributes, self).__init__(tracecount)
self.field = field
self.filehandle = filehandle
self.tracecount = tracecount
self.dtype = np.intc
def __iter__(self):
# attributes requires a custom iter, because self[:] returns a numpy
# array, which in itself is iterable, but not an iterator
return iter(self[:])
def __getitem__(self, i):
"""attributes[:]
Parameters
----------
i : int or slice or array_like
Returns
-------
attributes : array_like of dtype
Examples
--------
Read all unique sweep frequency end:
>>> end = segyio.TraceField.SweepFrequencyEnd
>>> sfe = np.unique(f.attributes( end )[:])
Discover the first traces of each unique sweep frequency end:
>>> end = segyio.TraceField.SweepFrequencyEnd
>>> attrs = f.attributes(end)
>>> sfe, tracenos = np.unique(attrs[:], return_index = True)
Scatter plot group x/y-coordinates with SFEs (using matplotlib):
>>> end = segyio.TraceField.SweepFrequencyEnd
>>> attrs = f.attributes(end)
>>> _, tracenos = np.unique(attrs[:], return_index = True)
>>> gx = f.attributes(segyio.TraceField.GroupX)[tracenos]
>>> gy = f.attributes(segyio.TraceField.GroupY)[tracenos]
>>> scatter(gx, gy)
"""
try:
xs = np.asarray(i, dtype = self.dtype)
xs = xs.astype(dtype = self.dtype, order = 'C', copy = False)
attrs = np.empty(len(xs), dtype = self.dtype)
return self.filehandle.field_foreach(attrs, xs, self.field)
except TypeError:
try:
i = slice(i, i + 1, 1)
except TypeError:
pass
traces = self.tracecount
filehandle = self.filehandle
field = self.field
start, stop, step = i.indices(traces)
indices = range(start, stop, step)
attrs = np.empty(len(indices), dtype = self.dtype)
return filehandle.field_forall(attrs, start, stop, step, field)
class Text(Sequence):
"""Interact with segy in text mode
This mode gives access to reading and writing functionality for textual
headers.
The primary data type is the python string. Reading textual headers is done
with [], and writing is done via assignment. No additional structure is
built around the textual header, so everything is treated as one long
string without line breaks.
Notes
-----
.. versionchanged:: 1.7
common list operations (collections.Sequence)
"""
def __init__(self, filehandle, textcount):
super(Text, self).__init__(textcount)
self.filehandle = filehandle
def __getitem__(self, i):
"""text[i]
Read the text header at i. 0 is the mandatory, main
Examples
--------
Print the textual header:
>>> print(f.text[0])
Print the first extended textual header:
>>> print(f.text[1])
Print a textual header line-by-line:
>>> # using zip, from the zip documentation
>>> text = str(f.text[0])
>>> lines = map(''.join, zip( *[iter(text)] * 80))
>>> for line in lines:
... print(line)
...
"""
try:
i = self.wrapindex(i)
return self.filehandle.gettext(i)
except TypeError:
try:
indices = i.indices(len(self))
except AttributeError:
msg = 'trace indices must be integers or slices, not {}'
raise TypeError(msg.format(type(i).__name__))
def gen():
for j in range(*indices):
yield self.filehandle.gettext(j)
return gen()
def __setitem__(self, i, val):
"""text[i] = val
Write the ith text header of the file, starting at 0.
If val is instance of Text or iterable of Text,
value is set to be the first element of every Text
Parameters
----------
i : int or slice
val : str, Text or iterable if i is slice
Examples
--------
Write a new textual header:
>>> f.text[0] = make_new_header()
>>> f.text[1:3] = ["new_header1", "new_header_2"]
Copy a textual header:
>>> f.text[1] = g.text[0]
Write a textual header based on Text:
>>> f.text[1] = g.text
>>> assert f.text[1] == g.text[0]
>>> f.text[1:3] = [g1.text, g2.text]
>>> assert f.text[1] == g1.text[0]
>>> assert f.text[2] == g2.text[0]
"""
if isinstance(val, Text):
self[i] = val[0]
return
try:
i = self.wrapindex(i)
self.filehandle.puttext(i, val)
except TypeError:
try:
indices = i.indices(len(self))
except AttributeError:
msg = 'trace indices must be integers or slices, not {}'
raise TypeError(msg.format(type(i).__name__))
for i, text in zip(range(*indices), val):
if isinstance(text, Text):
text = text[0]
self.filehandle.puttext(i, text)
def __str__(self):
msg = 'str(text) is deprecated, use explicit format instead'
warnings.warn(msg, DeprecationWarning)
return '\n'.join(map(''.join, zip(*[iter(str(self[0]))] * 80)))
| lgpl-3.0 |
yudingding6197/fin_script | kzz/kzz.py | 1 | 11334 | #!/usr/bin/env python
# -*- coding:gbk -*-
#¿Éתծ
import sys
import re
import urllib2
import time
import datetime
import zlib
import json
import getopt
import pandas as pd
sys.path.append('.')
from internal.dfcf_inf import *
'''
urlall = "http://data.eastmoney.com/kzz/default.html"
==========
[STARTDATE]='2017-12-04T00:00:00' É깺ÈÕÆÚ
[QSCFJ]='7.31' Ç¿Êê´¥·¢¼Û
[TEXCH]='CNSESZ' ½»Ò×Ëù´úÂë
[AISSUEVOL]='10.0' ·¢ÐйæÄ£
[ISSUEPRICE]='100.0' ·¢ÐмÛ
[DELISTDATE]='-' ÕªÅÆÈÕÆÚ
[BCODE]='17270600001JBV' £¿£¿±àÂë
[SWAPSCODE]='002284' Õý¹É´úÂë
[CORRESNAME]='ÑÇÌ«·¢Õ®' ծȯ·¢ÐÐÃû³Æ
[BONDCODE]='128023' ծȯ´úÂë
[PB]='2.36' PB
[SNAME]='ÑÇ̫תծ' ծȯÃû³Æ
[PARVALUE]='100.0' £¿£¿
[MarketType]='2' Êг¡ÀàÐÍ 1£ºÉϽ»£¬2£ºÉî½»
[LISTDATE]='2017-12-26T00:00:00' ÉÏÊн»Ò×ÈÕÆÚ
[HSCFJ]='13.57' »ØÊÛ´¥·¢¼Û
[SECURITYSHORTNAME]='ÑÇÌ«¹É·Ý' Õý¹ÉÃû³Æ
[GDYX_STARTDATE]='2017-12-01T00:00:00' ¹ÉȨµÇ¼ÇÈÕ
[LUCKRATE]='0.0245376003' ÖÐÇ©ÂÊ
[ZGJZGJJZ]='81.6091954023' µ±Ç°×ª¹É¼ÛÖµ
[LIMITBUYIPUB]='100.0' ×î´óÉ깺ÉÏÏÞ
[MEMO]='' ±¸×¢
[ZGJ]='8.52' ת¹É¼Û
[ZQHDATE]='2017-12-06T00:00:00' ÖÐÇ©ºÅ·¢²¼ÈÕ
[YJL]='22.5352112676' Òç¼ÛÂÊ
[FSTPLACVALPERSTK]='1.3558' ÿ¹ÉÅäÊÛ¶î
[ZQNEW]='100.0' ծȯ×îм۸ñ
[CORRESCODE]='072284' É깺´úÂë
[SWAPPRICE]='10.44' £¿£¿³õʼת¹É¼Û
[ZGJ_HQ]='8.52' Õý¹É¼Û
[ZGJZGJ]='10.44' µ±Ç°×ª¹É¼Û
'''
urlfmt = 'http://dcfm.eastmoney.com/em_mutisvcexpandinterface/api/js/get?type=KZZ_LB&token=70f12f2f4f091e459a279469fe49eca5&cmd=&st=STARTDATE&sr=-1&p=%d&ps=50&js=iGeILSKk={pages:(tp),data:(x)}&rt=50463927'
send_headers = {
'Host':'dcfm.eastmoney.com',
'Connection':'keep-alive',
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36',
'Accept':'*/*',
'DNT': '1',
'Referer': 'http://data.eastmoney.com/kzz/default.html',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Cookie': 'st_pvi=33604263703666; emstat_bc_emcount=300285734935048278; \
pi=6100112247957528%3byudingding6197%3bgoutou%3bcUC1rQLS6GFOJIpJ%2b0I6Wt5AdIat%2bLRj2ZvGrlzeObRNvIHEcy62FDfQ%2boIImkvxiIyCd9QIklChsWI2qjINWL\
5DdBKYMZ71JGBsgoVjEXYjdw1rWDHu45I%2bNugmP4pbtdgvhUf884FcXhI1tqTCeHOobtdLSzpfA7h3MiSCx5rf8AdOH0uOhUuvYFxLUOx0oD6KGdMI%3bJ7wwpyq2YPDBfbw\
AqENYGA8GKWnFXYc1dIR5LuUNwKNYfZKtn2ufQSBXaOy%2fJuok5A10Hmp70CM%2bH4jRSUeLe8OOEOwSG5o1tvO4rx%2fTjNoq%2fbM2d6QYkUKtXL0XTX8nREubTh%2bPugi\
WdGxX3hARJpanE0qULw%3d%3d; \
uidal=6100112247957528goutou; vtpst=|; '
}
rturl = 'http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeDataSimple?page=%d&num=80&sort=symbol&asc=1&node=hskzz_z&_s_r_a=page'
rt_headers = {
'Host': 'vip.stock.finance.sina.com.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.101 Safari/537.36',
'Content-type': 'application/x-www-form-urlencoded',
'Accept': '*/*',
'DNT': 1,
'Referer': 'http://vip.stock.finance.sina.com.cn/mkt/',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.8',
'Cookie': 'U_TRS1=0000000e.db0f67d9.58743532.0ab3855e; UOR=www.baidu.com,blog.sina.com.cn,; vjuids=-4f7e5f1b8.15985efc8ab.0.b0009b04a9d3a; \
SINAGLOBAL=114.243.223.213_1484010803.467733; SGUID=1490330513641_6143460; \
SCF=ApQZBkYNx5ED9eRh4x7RWZjDJZfZGEsCEcgqoaFHnaP7DqJZQpUkYRbUtwv1spWbrMvv9eU5YBJ8U5RXwjUggcc.; \
FINA_V_S_2=sz300648,sh603600; vjlast=1504425305; Apache=10.13.240.35_1513310793.422171; U_TRS2=00000016.b8c612f7.5a3398c2.608c14c5; \
SessionID=oe6kbh7j9v4usqdkigqe4inb71; ULV=1513330875049:56:5:3:10.13.240.35_1513310793.422171:1513225944670; \
sso_info=v02m6alo5qztbmdlpGpm6adpJqWuaeNo4S5jbKZtZqWkL2Mk5i1jaOktYyDmLOMsMDA; \
SUB=_2A253Rcj3DeThGedI7lQY9S7KyD-IHXVUMr0_rDV_PUNbm9AKLWTjkW9NVwM9cn_D0fMlGi8-URaLNK3j_mTGomwb; \
SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9W5.n90RO.U79QHoy5Rk17Op5NHD95QpSo-c1K-7Soe0Ws4Dqcjdi--NiKyFiK.Ni--4i-zpi-ihi--fi-2Xi-zX; \
ALF=1545792551; rotatecount=2; SR_SEL=1_511; lxlrttp=1513341002; FINANCE2=8d5626b3132364178ca15d9e87dc4f27; \
SINA_FINANCE=yudingding6197%3A1656950633%3A4'
}
'''
def getKZZConnect(page):
global content
LOOP_COUNT=0
urllink = urlfmt % (page)
res_data = None
while LOOP_COUNT<3:
try:
#print urllink
req = urllib2.Request(urllink,headers=send_headers)
res_data = urllib2.urlopen(req)
except:
print "Exception kzz urlopen"
LOOP_COUNT += 1
else:
break
if res_data is None:
print "Error: Fail to get request"
content = ''
return
content = res_data.read().decode('utf8')
return
'''
#µÃµ½½»Ò×ÈÕKZZµÄʵʱ¼Û¸ñ
def getKZZRtSinaByPage(list, page):
global content
LOOP_COUNT=0
urllink = rturl % (page)
res_data = None
while LOOP_COUNT<3:
try:
#print urllink
req = urllib2.Request(urllink,headers=rt_headers)
res_data = urllib2.urlopen(req)
except:
if LOOP_COUNT==1:
print "Exception kzz urlopen"
print urllink
time.sleep(0.5)
LOOP_COUNT += 1
else:
break
#print ("res_data", res_data)
if res_data is None:
print "Error: Fail to get request"
content = ''
return 'error'
content = res_data.read()
respInfo = res_data.info()
if( ("Content-Encoding" in respInfo) and (respInfo['Content-Encoding'] == "gzip")) :
content = zlib.decompress(content, 16+zlib.MAX_WBITS);
#print (content)
if content=='null':
return content
if len(content)<=2:
content = ''
return 'error'
if content[0]=='[' and content[-1]==']':
content = content [1:-1]
# 'settlement'ÊÇ×òÊÕ
conv_clmn=['changepercent','trade','pricechange','open','high','low','settlement']
while 1:
obj = re.match(r'{(.*?)},?(.*)', content)
if obj is None:
break
record = obj.group(1)
#print record
content = obj.group(2)
dict = {}
ll = record.split(',')
for item in ll:
#µÚ¶þÏî±íʾ·Ç"×Ö·û½øÐÐÆ¥Åä
dictObj = re.match(r'(.*?):"?([^"]*)', item)
if dictObj is None:
print "Invalid item in dict", item
break
#½«symbolÇ°ÃæµÄsh or sz È¥µô
if dictObj.group(1)=='symbol':
dict[dictObj.group(1)] = dictObj.group(2)[2:]
#½«Õǵø°Ù·Ö±Èǿתfloat
elif dictObj.group(1) in conv_clmn:
dict[dictObj.group(1)] = float(dictObj.group(2))
else:
dict[dictObj.group(1)] = dictObj.group(2)
list.append(dict)
return 'ok'
def getKZZRtSina(list):
for page in range(1,10):
ret = getKZZRtSinaByPage(list, page)
if ret=='null':
break
#['BONDCODE','SNAME','ZQNEW','YJL','ZGJZGJJZ','ZGJ_HQ','SWAPSCODE','SECURITYSHORTNAME','ZGJZGJ']
def output(kdf):
fmt = u"%2d %6s %8s %8s %5.2f %5.2f %5.2f %5.2f"
df = kdf.sort_values(['YJL'],0,True)
rank = 0
for index,items in df.iterrows():
value=items[1]
nmlen=len(items[1].encode('gbk'))
if nmlen<8:
left=8-nmlen
while left>0:
value=' '+value
left-=1
ZQNEW = items['ZQNEW']
YJL = float(items['YJL'])
ZGJZ = float(items['ZGJZGJJZ'])
ZGJ_HQ = float(items['ZGJ_HQ'])
ZGJZGJ = float(items['ZGJZGJ'])
str = fmt % (rank+1, items[0],items[1],ZQNEW,YJL,ZGJZ,ZGJ_HQ,ZGJZGJ)
rank += 1
print str
return
def show_item(rank, items):
fmt = "%2d %6s %8s %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f %5.2f"
if items is not None:
value=items['name']
nmlen=len(value)
if nmlen<8:
left=8-nmlen
while left>0:
value=' '+value
left-=1
code = items['code']
cp = items['changepercent']
trade = items['trade']
open = items['open']
high = items['high']
low = items['low']
z_close = items['settlement']
open_p = (open-z_close)*100/z_close
high_p = (high-z_close)*100/z_close
low_p = (low-z_close)*100/z_close
if high==0:
open_p = 0
high_p = 0
low_p = 0
YJL = (items['YJL'])
ZGJZ = (items['ZGJZGJJZ'])
if trade==0:
str = fmt % (rank, code, value.decode('gbk'), cp,trade,open,high,low,YJL,ZGJZ)
else:
str = fmt % (rank, code, value.decode('gbk'), cp,trade,open_p,high_p,low_p,YJL,ZGJZ)
print str
def output_rank(mgdf, priority):
fmt = " %6s %8s %-7s %-7s %-7s %-7s %-7s %-7s %-7s"
print fmt%('code', 'name', 'change', 'price', 'open', 'high', 'low', 'YJL', 'ZGJZ')
flag = False
sortitem = 'changepercent'
if param_config['Daoxu']==1:
flag = True
if param_config['Price']==1:
sortitem = 'trade'
elif param_config['YJL']==1:
sortitem = 'YJL'
elif param_config['ZGJZ']==1:
sortitem = 'ZGJZGJJZ'
df = mgdf.sort_values([sortitem],0, flag)
rank = 0
for code in priority:
#print code
items = df.ix[code]
#print items
show_item(rank, items)
print '=================================================='
print ''
for index,items in df.iterrows():
if param_config['ALL']==0 and rank>21:
break
rank += 1
show_item(rank, items)
return
param_config = {
"Daoxu":0,
"Price":0,
"YJL":0,
"ZGJZ":0,
"ALL":0
}
#Main
if __name__=="__main__":
#µ¹Ðò£¬¼Û¸ñ£¬Òç¼ÛÂÊ
optlist, args = getopt.getopt(sys.argv[1:], 'dpyza')
for option, value in optlist:
if option in ["-d","--daoxu"]:
param_config["Daoxu"] = 1
elif option in ["-p","--price"]:
param_config["Price"] = 1
elif option in ["-y","--yjl"]:
param_config["YJL"] = 1
elif option in ["-z","--zgjz"]:
param_config["ZGJZ"] = 1
elif option in ["-a","--all"]:
param_config["ALL"] = 1
pass
priority = []
flag = 0
data_path = "debug/_self_define.txt"
file = open(data_path, 'r')
while 1:
lines = file.readlines(100000)
if not lines:
break
for line in lines:
line=line.strip()
if line=='KZZ':
flag=1
continue
elif flag==1 and line=='END':
break
if flag==0:
continue
code=line.strip()
if len(code)!=6:
continue;
if not code.isdigit():
continue;
priority.append(code)
file.close()
req_count=0
curpage = 1
kzzlist = []
content=''
totalpage = 0
while 1:
req_count += 1
if req_count>10:
break
content = getKZZConnect(urlfmt, send_headers, curpage)
if content=='':
break
dataObj = re.match('^iGeILSKk={pages:(\d+),data:\[(.*)\]}', content)
if dataObj is None:
print "Content format not match"
break
if totalpage < 1:
totalpage = int(dataObj.group(1))
if curpage>totalpage:
break
curpage += 1
#·ÖÎö¾ßÌåµÄÿһÏîÊý¾Ý
getFilterZhai(dataObj.group(2), 1, kzzlist)
#print curpage, totalpage
#break
if len(kzzlist)==0:
print "Not find data"
exit()
print datetime.datetime.now()
keylist = ["STARTDATE", "QSCFJ", "TEXCH", "AISSUEVOL", "ISSUEPRICE", "DELISTDATE",
"BCODE", "SWAPSCODE", "CORRESNAME", "BONDCODE", "PB", "SNAME", "PARVALUE", "MarketType",
"LISTDATE", "HSCFJ", "SECURITYSHORTNAME", "GDYX_STARTDATE", "LUCKRATE", "ZGJZGJJZ",
"LIMITBUYIPUB", "MEMO", "ZGJ", "ZQHDATE", "YJL", "FSTPLACVALPERSTK", "ZQNEW",
"CORRESCODE", "SWAPPRICE", "ZGJ_HQ", "ZGJZGJ"]
keylist = ['BONDCODE','SNAME','ZQNEW','YJL','ZGJZGJJZ','ZGJ_HQ','SWAPSCODE','SECURITYSHORTNAME','ZGJZGJ']
kzzdf = pd.DataFrame(kzzlist, columns=keylist)
kzzdf1 = kzzdf.set_index('BONDCODE')
sinakey = ['sell', 'volume', 'buy', 'name', 'ticktime', 'symbol', 'pricechange', 'changepercent', 'trade', 'high', 'amount','code', 'low', 'settlement', 'open']
sina_rt = []
getKZZRtSina(sina_rt)
if len(sina_rt)==0:
print "Get Sina KZZ fail"
exit()
sinadf=pd.DataFrame(sina_rt, columns=sinakey)
sinadf=sinadf.set_index('symbol')
#Ö±½ÓºÏ²¢2¸ödataFrame£¬¸ù¾ÝË÷ÒýºÏ²¢
mgdf = pd.merge(sinadf, kzzdf1, how='left', left_index=True, right_index=True)
#output(kzzdf)
# for indexs in kzzdf.index:
# print(kzzdf.loc[indexs].values[0:-1])
output_rank(mgdf, priority)
| gpl-2.0 |
alexeyum/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
Oscarlight/PiNN_Caffe2 | ac_qv_api.py | 1 | 11919 | import caffe2_paths
import os
import pickle
from caffe2.python import (
workspace, layer_model_helper, schema, optimizer, net_drawer
)
import caffe2.python.layer_model_instantiator as instantiator
import numpy as np
from pinn.adjoint_mlp_lib import build_adjoint_mlp, init_model_with_schemas
import pinn.data_reader as data_reader
import pinn.preproc as preproc
import pinn.parser as parser
import pinn.visualizer as visualizer
import pinn.exporter as exporter
from shutil import copyfile
# import logging
import matplotlib.pyplot as plt
class ACQVModel:
def __init__(
self,
model_name,
input_dim=1,
output_dim=1,
):
self.model_name = model_name
self.input_dim = input_dim
self.output_dim = output_dim
self.model = init_model_with_schemas(
model_name, self.input_dim, self.output_dim)
self.input_data_store = {}
self.preproc_param = {}
self.net_store = {}
self.reports = {'epoch':[],'train_loss':[], 'eval_loss':[]}
def add_data(
self,
data_tag,
data_arrays,
preproc_param,
override=True,
):
'''
data_arrays are in the order of origin_input, adjoint_label
origin_input and adjoint_label must be numpy arrays
'''
#check length and dimensions of origin input and adjoint label
assert len(data_arrays) == 2, 'Incorrect number of input data'
voltages = data_arrays[0]
capas = data_arrays[1]
assert voltages.shape == capas.shape, 'Mismatch dimensions'
#Set preprocess parameters and database name
self.preproc_param = preproc_param
self.pickle_file_name = self.model_name + '_preproc_param' + '.p'
db_name = self.model_name + '_' + data_tag + '.minidb'
if os.path.isfile(db_name):
if override:
print("XXX Delete the old database...")
os.remove(db_name)
os.remove(self.pickle_file_name)
else:
raise Exception('Encounter database with the same name. ' +
'Choose the other model name or set override to True.')
print("+++ Create a new database...")
self.preproc_param.setdefault('max_loss_scale', 1.)
pickle.dump(
self.preproc_param,
open(self.pickle_file_name, 'wb')
)
#Preprocess the data
voltages, capas = preproc.ac_qv_preproc(
voltages, capas,
self.preproc_param['scale'],
self.preproc_param['vg_shift']
)
# Only expand the dim if the number of dimension is 1
origin_input = np.expand_dims(
voltages, axis=1) if voltages.ndim == 1 else voltages
adjoint_label = np.expand_dims(
capas, axis=1) if capas.ndim == 1 else capas
# Create adjoint_input data
adjoint_input = np.ones((origin_input.shape[0], 1))
# Set the data type to np float for origin input, adjoint input, adjoint label
origin_input = origin_input.astype(np.float32)
adjoint_input = adjoint_input.astype(np.float32)
adjoint_label = adjoint_label.astype(np.float32)
# Write to database
data_reader.write_db(
'minidb', db_name,
[origin_input, adjoint_input, adjoint_label]
)
self.input_data_store[data_tag] = [db_name, origin_input.shape[0]]
# add_data_base: add the database file directly
def add_database(
self,
data_tag,
db_name,
num_example,
preproc_param_pickle_name,
):
self.input_data_store[data_tag] = [db_name, num_example]
# Save the preproc_param with the model
self.pickle_file_name = self.model_name + '_' + preproc_param_pickle_name
copyfile(preproc_param_pickle_name, self.pickle_file_name)
def build_nets(
self,
hidden_dims,
batch_size=1,
optim_method = 'AdaGrad',
optim_param = {'alpha':0.01, 'epsilon':1e-4},
):
assert len(self.input_data_store) > 0, 'Input data store is empty.'
assert 'train' in self.input_data_store, 'Missing training data.'
self.batch_size = batch_size
# Build the date reader net for train net
input_data_train = data_reader.build_input_reader(
self.model,
self.input_data_store['train'][0],
'minidb',
['origin_input', 'adjoint_input', 'label'],
batch_size=batch_size,
data_type='train',
)
if 'eval' in self.input_data_store:
# Build the data reader net for eval net
input_data_eval = data_reader.build_input_reader(
self.model,
self.input_data_store['eval'][0],
'minidb',
['origin_input', 'adjoint_input', 'label'],
batch_size=batch_size,
data_type='eval',
)
# Build the computational nets
# Create train net
self.model.input_feature_schema.origin_input.set_value(
input_data_train[0].get(), unsafe=True)
self.model.input_feature_schema.adjoint_input.set_value(
input_data_train[1].get(), unsafe=True)
self.model.trainer_extra_schema.label.set_value(
input_data_train[2].get(), unsafe=True)
self.origin_pred, self.adjoint_pred, self.loss = build_adjoint_mlp(
self.model,
input_dim = self.input_dim,
hidden_dims = hidden_dims,
output_dim = self.output_dim,
optim=_build_optimizer(
optim_method, optim_param),
)
train_init_net, train_net = instantiator.generate_training_nets(self.model)
workspace.RunNetOnce(train_init_net)
workspace.CreateNet(train_net)
self.net_store['train_net'] = train_net
pred_net = instantiator.generate_predict_net(self.model)
workspace.CreateNet(pred_net)
self.net_store['pred_net'] = pred_net
if 'eval' in self.input_data_store:
# Create eval net
self.model.input_feature_schema.origin_input.set_value(
input_data_eval[0].get(), unsafe=True)
self.model.input_feature_schema.adjoint_input.set_value(
input_data_eval[1].get(), unsafe=True)
self.model.trainer_extra_schema.label.set_value(
input_data_eval[2].get(), unsafe=True)
eval_net = instantiator.generate_eval_net(self.model)
workspace.CreateNet(eval_net)
self.net_store['eval_net'] = eval_net
def train_with_eval(
self,
num_epoch=1,
report_interval=0,
eval_during_training=False,
):
''' Fastest mode: report_interval = 0
Medium mode: report_interval > 0, eval_during_training=False
Slowest mode: report_interval > 0, eval_during_training=True
'''
num_batch_per_epoch = int(
self.input_data_store['train'][1] /
self.batch_size
)
if not self.input_data_store['train'][1] % self.batch_size == 0:
num_batch_per_epoch += 1
print('[Warning]: batch_size cannot be divided. ' +
'Run on {} example instead of {}'.format(
num_batch_per_epoch * self.batch_size,
self.input_data_store['train'][1]
)
)
print('<<< Run {} iteration'.format(num_epoch * num_batch_per_epoch))
train_net = self.net_store['train_net']
if report_interval > 0:
print('>>> Training with Reports')
num_eval = int(num_epoch / report_interval)
num_unit_iter = int((num_batch_per_epoch * num_epoch)/num_eval)
if eval_during_training and 'eval_net' in self.net_store:
print('>>> Training with Eval Reports (Slowest mode)')
eval_net = self.net_store['eval_net']
for i in range(num_eval):
workspace.RunNet(
train_net.Proto().name,
num_iter=num_unit_iter
)
self.reports['epoch'].append((i + 1) * report_interval)
train_loss = np.asscalar(schema.FetchRecord(self.loss).get())
self.reports['train_loss'].append(train_loss)
if eval_during_training and 'eval_net' in self.net_store:
workspace.RunNet(
eval_net.Proto().name,
num_iter=num_unit_iter)
eval_loss = np.asscalar(schema.FetchRecord(self.loss).get())
self.reports['eval_loss'].append(eval_loss)
else:
print('>>> Training without Reports (Fastest mode)')
num_iter = num_epoch*num_batch_per_epoch
workspace.RunNet(
train_net,
num_iter=num_iter
)
print('>>> Saving test model')
exporter.save_net(
self.net_store['pred_net'],
self.model,
self.model_name+'_init', self.model_name+'_predict'
)
def draw_nets(self):
for net_name in self.net_store:
net = self.net_store[net_name]
graph = net_drawer.GetPydotGraph(net.Proto().op, rankdir='TB')
with open(net.Name() + ".png",'wb') as f:
f.write(graph.create_png())
def plot_loss_trend(self):
plt.plot(self.reports['epoch'], self.reports['train_loss'])
if len(self.reports['eval_loss']) > 0:
plt.plot(self.reports['epoch'], self.reports['eval_loss'], 'r--')
plt.show()
def save_loss_trend(self,save_name):
if len(self.reports['eval_loss'])>0:
f = open(save_name+'_loss_trend.csv', "w")
f.write(
"{},{},{}\n".format(
"epoch", "train_loss","eval_loss"))
for x in zip(
self.reports['epoch'],
self.reports['train_loss'],
self.reports['eval_loss']):
f.write("{},{},{}\n".format(
x[0], x[1], x[2]))
f.close()
else:
f = open(save_name+'_loss_trend.csv', "w")
f.write("{},{}\n".format("epoch", "train_loss"))
for x in zip(
self.reports['epoch'],
self.reports['train_loss']):
f.write("{},{}\n".format(x[0], x[1]))
f.close()
# --------------------------------------------------------
# ---------------- Global functions -------------------
# --------------------------------------------------------
def predict_qs(model_name, terminal, voltages):
workspace.ResetWorkspace()
# requires voltages is an numpy array of size
# (batch size, input_dimension)
# the first dimension is Vg and the second dimenstion is Vd
# preprocess the origin input and create adjoint input
preproc_param = pickle.load(
open(model_name+'_' + terminal + '_preproc_param.p', "rb" )
)
dummy_qs = np.zeros(voltages[0].shape[0])
voltages, dummy_qs = preproc.ac_qv_preproc(
voltages, dummy_qs,
preproc_param['scale'],
preproc_param['vg_shift']
)
adjoint_input = np.ones((voltages.shape[0], 1))
# Expand dimensions of input and set data type of inputs
origin_input = np.expand_dims(
voltages, axis=1)
origin_input = origin_input.astype(np.float32)
adjoint_input = adjoint_input.astype(np.float32)
workspace.FeedBlob('DBInput_train/origin_input', voltages)
workspace.FeedBlob('DBInput_train/adjoint_input', adjoint_input)
pred_net = exporter.load_net(model_name+'_init', model_name+'_predict')
workspace.RunNet(pred_net)
qs = np.squeeze(workspace.FetchBlob('origin/NanCheck/origin_pred'))
gradients = np.squeeze(workspace.FetchBlob('adjoint/fc0/output'))
restore_integral_func, restore_gradient_func = preproc.get_restore_q_func(
preproc_param['scale'],
preproc_param['vg_shift']
)
original_qs = restore_integral_func(qs)
original_gradients = restore_gradient_func(gradients)
return original_qs, original_gradients
def plot_iv(
vg, vd, ids,
vg_comp = None, vd_comp = None, ids_comp = None,
styles = ['vg_major_linear', 'vd_major_linear', 'vg_major_log', 'vd_major_log']
):
if 'vg_major_linear' in styles:
visualizer.plot_linear_Id_vs_Vd_at_Vg(
vg, vd, ids,
vg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,
)
if 'vd_major_linear' in styles:
visualizer.plot_linear_Id_vs_Vg_at_Vd(
vg, vd, ids,
vg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,
)
if 'vg_major_log' in styles:
visualizer.plot_log_Id_vs_Vd_at_Vg(
vg, vd, ids,
vg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,
)
if 'vd_major_log' in styles:
visualizer.plot_log_Id_vs_Vg_at_Vd(
vg, vd, ids,
vg_comp = vg_comp, vd_comp = vd_comp, ids_comp = ids_comp,
)
def _build_optimizer(optim_method, optim_param):
if optim_method == 'AdaGrad':
optim = optimizer.AdagradOptimizer(**optim_param)
elif optim_method == 'SgdOptimizer':
optim = optimizer.SgdOptimizer(**optim_param)
elif optim_method == 'Adam':
optim = optimizer.AdamOptimizer(**optim_param)
else:
raise Exception(
'Did you foget to implement {}?'.format(optim_method))
return optim
| mit |
miltondp/ukbrest | tests/test_rest_api_phenotype.py | 1 | 148176 | import io
import json
import unittest
import tempfile
from base64 import b64encode
from ukbrest import app
import pandas as pd
from tests.settings import POSTGRESQL_ENGINE
from tests.utils import get_repository_path, DBTest
from ukbrest.common.pheno2sql import Pheno2SQL
from ukbrest.common.utils.auth import PasswordHasher
class TestRestApiPhenotype(DBTest):
def _make_yaml_request(self, yaml_def, section, n_expected_rows, expected_columns):
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_def), 'data.yaml'),
'section': section,
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert pheno_file.shape == (n_expected_rows, len(expected_columns)), pheno_file.shape
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
return pheno_file
def setUp(self, filename=None, load_data=True, wipe_database=True, **kwargs):
if wipe_database:
super(TestRestApiPhenotype, self).setUp()
# Load data
p2sql = self._get_p2sql(filename, **kwargs)
if load_data:
p2sql.load_data()
app.app.config['pheno2sql'] = p2sql
# Configure
self.configureApp()
def _get_p2sql(self, filename, **kwargs):
if filename is None:
csv_file = get_repository_path('pheno2sql/example02.csv')
elif isinstance(filename, (tuple, list)):
csv_file = tuple([get_repository_path(f) for f in filename])
elif isinstance(filename, str):
csv_file = get_repository_path(filename)
else:
raise ValueError('filename unknown type')
if 'db_uri' not in kwargs:
kwargs['db_uri'] = POSTGRESQL_ENGINE
if 'n_columns_per_table' not in kwargs:
kwargs['n_columns_per_table'] = 2
return Pheno2SQL(csv_file, **kwargs)
def configureApp(self, app_func=None):
app.app.config['testing'] = True
app.app.config['auth'] = None
if app_func is not None:
app_func(app.app)
self.app = app.app.test_client()
def configureAppWithAuth(self, user_pass_line):
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
with open(f.name, 'w') as fi:
fi.write(user_pass_line)
ph = PasswordHasher(f.name, method='pbkdf2:sha256')
def conf(a):
a.config['auth'] = ph.setup_http_basic_auth()
self.configureApp(conf)
def _get_http_basic_auth_header(self, user, password):
return {'Authorization': 'Basic %s' % b64encode(f'{user}:{password}'.encode()).decode("ascii")}
def test_not_found(self):
response = self.app.get('/ukbrest/api/v1.0/')
assert response.status_code == 404, response.status_code
def test_phenotype_fields(self):
# Prepare
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype/fields')
# Validate
assert response.status_code == 200, response.status_code
fields = json.loads(response.data.decode('utf-8'))
assert len(fields) == 8
def test_phenotype_fields_http_auth_no_credentials(self):
# Prepare
self.configureAppWithAuth('user: thepassword2')
# Run
response = self.app.get(
'/ukbrest/api/v1.0/phenotype/fields',
# headers=self._get_http_basic_auth_header('user', 'thepassword2'),
)
# Validate
assert response.status_code == 401, response.status_code
def test_phenotype_fields_http_auth_with_credentials(self):
# Prepare
self.configureAppWithAuth('user: thepassword2')
# Run
response = self.app.get(
'/ukbrest/api/v1.0/phenotype/fields',
headers=self._get_http_basic_auth_header('user', 'thepassword2'),
)
# Validate
assert response.status_code == 200, response.status_code
fields = json.loads(response.data.decode('utf-8'))
assert len(fields) == 8
def test_phenotype_fields_http_auth_multiple_users(self):
# Prepare
self.configureAppWithAuth(
'user: thepassword2\n'
'another_user: another_password'
)
# Run
response = self.app.get(
'/ukbrest/api/v1.0/phenotype/fields',
headers=self._get_http_basic_auth_header('user', 'thepassword2'),
)
# Validate
assert response.status_code == 200, response.status_code
fields = json.loads(response.data.decode('utf-8'))
assert len(fields) == 8
# Run 2
response = self.app.get(
'/ukbrest/api/v1.0/phenotype/fields',
headers=self._get_http_basic_auth_header('another_user', 'another_password'),
)
# Validate
assert response.status_code == 200, response.status_code
fields = json.loads(response.data.decode('utf-8'))
assert len(fields) == 8
def test_phenotype_query_single_column_format_csv(self):
# Prepare
columns = ['c21_0_0']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
csv_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), index_col='eid', dtype=str)
assert csv_file is not None
assert not csv_file.empty
assert csv_file.shape == (4, 1)
assert csv_file.index.name == 'eid'
assert len(csv_file.index) == 4
assert all(x in csv_file.index for x in range(1, 4 + 1))
assert len(csv_file.columns) == len(columns)
assert all(x in columns for x in csv_file.columns)
assert csv_file.loc[1, 'c21_0_0'] == 'Option number 1'
assert csv_file.loc[2, 'c21_0_0'] == 'Option number 2'
assert csv_file.loc[3, 'c21_0_0'] == 'Option number 3'
assert csv_file.loc[4, 'c21_0_0'] == 'Option number 4'
def test_phenotype_query_error_column_does_not_exist(self):
# Prepare
columns = ['nonexistent_column']
parameters = {
'columns': columns,
}
# Run
# with self.app:
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 400, response.status_code
data = json.load(io.StringIO(response.data.decode('utf-8')))
assert 'message' in data, data
assert 'column "nonexistent_column" does not exist' in data['message'], data['message']
assert 'output' not in data, data
def test_phenotype_query_error_column_does_not_exist_standard_column_name(self):
# Prepare
columns = ['c999_0_0']
parameters = {
'columns': columns,
}
# Run
# with self.app:
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 400, response.status_code
data = json.load(io.StringIO(response.data.decode('utf-8')))
assert 'status_code' in data, data
assert data['status_code'] == 400, data['status_code']
assert 'error_type' in data, data
assert data['error_type'] == 'SQL_EXECUTION_ERROR'
assert 'message' in data, data
assert 'column "c999_0_0" does not exist' in data['message'], data['message']
assert 'output' not in data, data
def test_phenotype_query_error_cannot_connect_to_database(self):
# Prepare
self.setUp(load_data=False, db_uri='postgresql://test:test@wronghost:5432/ukb')
columns = ['c21_0_0', 'invalid value here']
parameters = {
'columns': columns,
}
# Run
# with self.app:
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 500, response.status_code
data = json.load(io.StringIO(response.data.decode('utf-8')))
assert 'status_code' in data, data
assert data['status_code'] == 500, data['status_code']
assert 'error_type' in data, data
assert data['error_type'] == 'UNKNOWN', data['error_type']
assert 'message' in data, data
assert 'psycopg2.OperationalError' in data['message'], data['message']
assert 'wronghost' in data['message'], data['message']
def test_phenotype_query_multiple_column_format_csv(self):
# Prepare
columns = ['c21_0_0', 'c48_0_0']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
csv_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), index_col='eid', dtype=str)
assert csv_file is not None
assert not csv_file.empty
assert csv_file.shape == (4, 2)
assert csv_file.index.name == 'eid'
assert len(csv_file.index) == 4
assert all(x in csv_file.index for x in range(1, 4 + 1))
assert len(csv_file.columns) == len(columns)
assert all(x in columns for x in csv_file.columns)
assert csv_file.loc[1, 'c21_0_0'] == 'Option number 1'
assert csv_file.loc[2, 'c21_0_0'] == 'Option number 2'
assert csv_file.loc[3, 'c21_0_0'] == 'Option number 3'
assert csv_file.loc[4, 'c21_0_0'] == 'Option number 4'
assert csv_file.loc[1, 'c48_0_0'] == '2011-08-14'
assert csv_file.loc[2, 'c48_0_0'] == '2016-11-30'
assert csv_file.loc[3, 'c48_0_0'] == '2010-01-01'
assert csv_file.loc[4, 'c48_0_0'] == '2011-02-15'
def test_phenotype_query_multiple_column_format_pheno(self):
# Prepare
columns = ['c21_0_0', 'c48_0_0']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/plink2'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (4, 2 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 4
assert all(x in pheno_file.index for x in range(1, 4 + 1))
expected_columns = ['IID'] + columns
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[1, 'c21_0_0'] == 'Option number 1'
assert pheno_file.loc[2, 'c21_0_0'] == 'Option number 2'
assert pheno_file.loc[3, 'c21_0_0'] == 'Option number 3'
assert pheno_file.loc[4, 'c21_0_0'] == 'Option number 4'
assert pheno_file.loc[1, 'c48_0_0'] == '2011-08-14'
assert pheno_file.loc[2, 'c48_0_0'] == '2016-11-30'
assert pheno_file.loc[3, 'c48_0_0'] == '2010-01-01'
assert pheno_file.loc[4, 'c48_0_0'] == '2011-02-15'
def test_phenotype_query_multiple_column_renaming(self):
# Prepare
columns = ['c21_0_0 as c21', 'c31_0_0 c31', 'c48_0_0']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/plink2'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (4, 3 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 4
assert all(x in pheno_file.index for x in range(1, 4 + 1))
expected_columns = ['IID'] + ['c21', 'c31', 'c48_0_0']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[1, 'c21'] == 'Option number 1'
assert pheno_file.loc[2, 'c21'] == 'Option number 2'
assert pheno_file.loc[3, 'c21'] == 'Option number 3'
assert pheno_file.loc[4, 'c21'] == 'Option number 4'
assert pheno_file.loc[1, 'c31'] == '2012-01-05'
assert pheno_file.loc[2, 'c31'] == '2015-12-30'
assert pheno_file.loc[3, 'c31'] == '2007-03-19'
assert pheno_file.loc[4, 'c31'] == '2002-05-09'
assert pheno_file.loc[1, 'c48_0_0'] == '2011-08-14'
assert pheno_file.loc[2, 'c48_0_0'] == '2016-11-30'
assert pheno_file.loc[3, 'c48_0_0'] == '2010-01-01'
assert pheno_file.loc[4, 'c48_0_0'] == '2011-02-15'
def test_phenotype_query_filtering_with_column_no_mentioned_in_select(self):
# Prepare
columns = ['c21_0_0 as c21', 'c21_2_0 c21_2']
filtering = ["c46_0_0 < 0", "c48_0_0 > '2011-01-01'"]
parameters = {
'columns': columns,
'filters': filtering,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype', query_string=parameters)
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape[0] == 2
assert pheno_file.shape[1] == 2 + 1 # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 2
assert all(x in pheno_file.index for x in (1, 2))
expected_columns = ['IID'] + ['c21', 'c21_2']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
# column order
assert pheno_file.columns.tolist()[0] == 'IID'
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[1, 'c21'] == 'Option number 1'
assert pheno_file.loc[2, 'c21'] == 'Option number 2'
assert pheno_file.loc[1, 'c21_2'] == 'Yes'
assert pheno_file.loc[2, 'c21_2'] == 'No'
def test_phenotype_query_multiple_column_integer_values(self):
# Prepare
columns = ['c34_0_0', 'c46_0_0', 'c47_0_0']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/plink2'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (4, 3 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 4
assert all(x in pheno_file.index for x in range(1, 4 + 1))
expected_columns = ['IID'] + columns
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[1, 'c34_0_0'] == '21'
assert pheno_file.loc[2, 'c34_0_0'] == '12'
assert pheno_file.loc[3, 'c34_0_0'] == '1'
assert pheno_file.loc[4, 'c34_0_0'] == '17'
assert pheno_file.loc[1, 'c46_0_0'] == '-9'
assert pheno_file.loc[2, 'c46_0_0'] == '-2'
assert pheno_file.loc[3, 'c46_0_0'] == '-7'
assert pheno_file.loc[4, 'c46_0_0'] == '4'
assert pheno_file.loc[1, 'c47_0_0'] == '45.55412'
assert pheno_file.loc[2, 'c47_0_0'] == '-0.55461'
assert pheno_file.loc[3, 'c47_0_0'] == '-5.32471'
assert pheno_file.loc[4, 'c47_0_0'] == '55.19832'
def test_phenotype_query_multiple_column_integer_values_with_nan(self):
# Prepare
self.setUp('pheno2sql/example06_nan_integer.csv')
columns = ['c34_0_0', 'c46_0_0', 'c47_0_0']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/plink2'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',
keep_default_na=False, index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (4, 3 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 4
assert all(x in pheno_file.index for x in range(1, 4 + 1))
expected_columns = ['IID'] + columns
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[1, 'c34_0_0'] == '21'
assert pheno_file.loc[2, 'c34_0_0'] == '12'
assert pheno_file.loc[3, 'c34_0_0'] == '1'
assert pheno_file.loc[4, 'c34_0_0'] == '17'
assert pheno_file.loc[1, 'c46_0_0'] == '-9'
assert pheno_file.loc[2, 'c46_0_0'] == 'NA'
assert pheno_file.loc[3, 'c46_0_0'] == '-7'
assert pheno_file.loc[4, 'c46_0_0'] == '4'
assert pheno_file.loc[1, 'c47_0_0'] == '45.55412'
assert pheno_file.loc[2, 'c47_0_0'] == '-0.55461'
assert pheno_file.loc[3, 'c47_0_0'] == '-5.32471'
assert pheno_file.loc[4, 'c47_0_0'] == '55.19832'
def test_phenotype_query_multiple_column_integer_values_with_nan_using_columns_renaming_with_as(self):
# Prepare
self.setUp('pheno2sql/example06_nan_integer.csv')
columns = ['c34_0_0 as c34', 'c46_0_0 as c46', 'c47_0_0 as c47']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/plink2'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',
keep_default_na=False, index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (4, 3 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 4
assert all(x in pheno_file.index for x in range(1, 4 + 1))
expected_columns = ['IID'] + ['c34', 'c46', 'c47']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[1, 'c34'] == '21'
assert pheno_file.loc[2, 'c34'] == '12'
assert pheno_file.loc[3, 'c34'] == '1'
assert pheno_file.loc[4, 'c34'] == '17'
assert pheno_file.loc[1, 'c46'] == '-9', pheno_file.loc[1, 'c46']
assert pheno_file.loc[2, 'c46'] == 'NA'
assert pheno_file.loc[3, 'c46'] == '-7'
assert pheno_file.loc[4, 'c46'] == '4'
assert pheno_file.loc[1, 'c47'] == '45.55412'
assert pheno_file.loc[2, 'c47'] == '-0.55461'
assert pheno_file.loc[3, 'c47'] == '-5.32471'
assert pheno_file.loc[4, 'c47'] == '55.19832'
def test_phenotype_query_multiple_column_integer_values_with_nan_using_columns_renaming_with_as_uppercase(self):
# Prepare
self.setUp('pheno2sql/example06_nan_integer.csv')
columns = ['c34_0_0 as c34', 'c46_0_0 AS c46', 'c47_0_0 as c47']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/plink2'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',
keep_default_na=False, index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (4, 3 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 4
assert all(x in pheno_file.index for x in range(1, 4 + 1))
expected_columns = ['IID'] + ['c34', 'c46', 'c47']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[1, 'c34'] == '21'
assert pheno_file.loc[2, 'c34'] == '12'
assert pheno_file.loc[3, 'c34'] == '1'
assert pheno_file.loc[4, 'c34'] == '17'
assert pheno_file.loc[1, 'c46'] == '-9', pheno_file.loc[1, 'c46']
assert pheno_file.loc[2, 'c46'] == 'NA'
assert pheno_file.loc[3, 'c46'] == '-7'
assert pheno_file.loc[4, 'c46'] == '4'
assert pheno_file.loc[1, 'c47'] == '45.55412'
assert pheno_file.loc[2, 'c47'] == '-0.55461'
assert pheno_file.loc[3, 'c47'] == '-5.32471'
assert pheno_file.loc[4, 'c47'] == '55.19832'
def test_phenotype_query_multiple_column_integer_values_with_nan_using_columns_renaming_with_space(self):
# Prepare
self.setUp('pheno2sql/example06_nan_integer.csv')
columns = ['c34_0_0 as c34', 'c46_0_0 c46', 'c47_0_0 as c47']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/plink2'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',
keep_default_na=False, index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (4, 3 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 4
assert all(x in pheno_file.index for x in range(1, 4 + 1))
expected_columns = ['IID'] + ['c34', 'c46', 'c47']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[1, 'c34'] == '21'
assert pheno_file.loc[2, 'c34'] == '12'
assert pheno_file.loc[3, 'c34'] == '1'
assert pheno_file.loc[4, 'c34'] == '17'
assert pheno_file.loc[1, 'c46'] == '-9', pheno_file.loc[1, 'c46']
assert pheno_file.loc[2, 'c46'] == 'NA'
assert pheno_file.loc[3, 'c46'] == '-7'
assert pheno_file.loc[4, 'c46'] == '4'
assert pheno_file.loc[1, 'c47'] == '45.55412'
assert pheno_file.loc[2, 'c47'] == '-0.55461'
assert pheno_file.loc[3, 'c47'] == '-5.32471'
assert pheno_file.loc[4, 'c47'] == '55.19832'
def test_phenotype_query_multiple_column_integer_values_with_nan_using_reg_exp(self):
# Prepare
self.setUp('pheno2sql/example06_nan_integer.csv')
columns = ['c34_0_0 as c34']
reg_exp_columns = ['c4[67]_0_0']
parameters = {
'columns': columns,
'ecolumns': reg_exp_columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/plink2'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',
keep_default_na=False, index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (4, 3 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 4
assert all(x in pheno_file.index for x in range(1, 4 + 1))
expected_columns = ['IID'] + ['c34', 'c46_0_0', 'c47_0_0']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[1, 'c34'] == '21'
assert pheno_file.loc[2, 'c34'] == '12'
assert pheno_file.loc[3, 'c34'] == '1'
assert pheno_file.loc[4, 'c34'] == '17'
assert pheno_file.loc[1, 'c46_0_0'] == '-9', pheno_file.loc[1, 'c46']
assert pheno_file.loc[2, 'c46_0_0'] == 'NA'
assert pheno_file.loc[3, 'c46_0_0'] == '-7'
assert pheno_file.loc[4, 'c46_0_0'] == '4'
assert pheno_file.loc[1, 'c47_0_0'] == '45.55412'
assert pheno_file.loc[2, 'c47_0_0'] == '-0.55461'
assert pheno_file.loc[3, 'c47_0_0'] == '-5.32471'
assert pheno_file.loc[4, 'c47_0_0'] == '55.19832'
def test_phenotype_query_multiple_column_create_field_from_integer(self):
# Prepare
columns = ['c34_0_0', 'c46_0_0', 'c47_0_0', 'c46_0_0^2 as squared']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/plink2'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (4, 4 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 4
assert all(x in pheno_file.index for x in range(1, 4 + 1))
expected_columns = ['IID'] + columns
assert len(pheno_file.columns) == len(expected_columns)
assert all(x.split()[-1] in pheno_file.columns for x in expected_columns)
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[1, 'c34_0_0'] == '21'
assert pheno_file.loc[2, 'c34_0_0'] == '12'
assert pheno_file.loc[3, 'c34_0_0'] == '1'
assert pheno_file.loc[4, 'c34_0_0'] == '17'
assert pheno_file.loc[1, 'c46_0_0'] == '-9'
assert pheno_file.loc[2, 'c46_0_0'] == '-2'
assert pheno_file.loc[3, 'c46_0_0'] == '-7'
assert pheno_file.loc[4, 'c46_0_0'] == '4'
assert pheno_file.loc[1, 'c47_0_0'] == '45.55412'
assert pheno_file.loc[2, 'c47_0_0'] == '-0.55461'
assert pheno_file.loc[3, 'c47_0_0'] == '-5.32471'
assert pheno_file.loc[4, 'c47_0_0'] == '55.19832'
# square results in float type
assert pheno_file.loc[1, 'squared'] == '81.0'
assert pheno_file.loc[2, 'squared'] == '4.0'
assert pheno_file.loc[3, 'squared'] == '49.0'
assert pheno_file.loc[4, 'squared'] == '16.0'
def test_phenotype_query_multiple_column_create_field_from_integer_return_integer(self):
# Prepare
columns = ['c34_0_0', 'c46_0_0', 'c47_0_0', 'c46_0_0 + 1 as sum']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/plink2'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (4, 4 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 4
assert all(x in pheno_file.index for x in range(1, 4 + 1))
expected_columns = ['IID'] + columns
assert len(pheno_file.columns) == len(expected_columns)
assert all(x.split()[-1] in pheno_file.columns for x in expected_columns)
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[1, 'c34_0_0'] == '21'
assert pheno_file.loc[2, 'c34_0_0'] == '12'
assert pheno_file.loc[3, 'c34_0_0'] == '1'
assert pheno_file.loc[4, 'c34_0_0'] == '17'
assert pheno_file.loc[1, 'c46_0_0'] == '-9'
assert pheno_file.loc[2, 'c46_0_0'] == '-2'
assert pheno_file.loc[3, 'c46_0_0'] == '-7'
assert pheno_file.loc[4, 'c46_0_0'] == '4'
assert pheno_file.loc[1, 'c47_0_0'] == '45.55412'
assert pheno_file.loc[2, 'c47_0_0'] == '-0.55461'
assert pheno_file.loc[3, 'c47_0_0'] == '-5.32471'
assert pheno_file.loc[4, 'c47_0_0'] == '55.19832'
# square results in float type
assert pheno_file.loc[1, 'sum'] == '-8'
assert pheno_file.loc[2, 'sum'] == '-1'
assert pheno_file.loc[3, 'sum'] == '-6'
assert pheno_file.loc[4, 'sum'] == '5'
def test_phenotype_query_multiple_column_create_field_from_float(self):
# Prepare
columns = ['c34_0_0', 'c46_0_0', 'c47_0_0', 'c47_0_0^2 as squared']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/plink2'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (4, 4 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 4
assert all(x in pheno_file.index for x in range(1, 4 + 1))
expected_columns = ['IID'] + columns
assert len(pheno_file.columns) == len(expected_columns)
assert all(x.split()[-1] in pheno_file.columns for x in expected_columns)
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[1, 'c34_0_0'] == '21'
assert pheno_file.loc[2, 'c34_0_0'] == '12'
assert pheno_file.loc[3, 'c34_0_0'] == '1'
assert pheno_file.loc[4, 'c34_0_0'] == '17'
assert pheno_file.loc[1, 'c46_0_0'] == '-9'
assert pheno_file.loc[2, 'c46_0_0'] == '-2'
assert pheno_file.loc[3, 'c46_0_0'] == '-7'
assert pheno_file.loc[4, 'c46_0_0'] == '4'
assert pheno_file.loc[1, 'c47_0_0'] == '45.55412'
assert pheno_file.loc[2, 'c47_0_0'] == '-0.55461'
assert pheno_file.loc[3, 'c47_0_0'] == '-5.32471'
assert pheno_file.loc[4, 'c47_0_0'] == '55.19832'
# square results in float type
assert pheno_file.loc[1, 'squared'] == '2075.1778489744'
assert pheno_file.loc[2, 'squared'] == '0.3075922521'
assert pheno_file.loc[3, 'squared'] == '28.3525365841'
assert pheno_file.loc[4, 'squared'] == '3046.8545308224'
def test_phenotype_query_multiple_column_create_field_from_str(self):
# Prepare
columns = ['c34_0_0', 'c46_0_0', 'c47_0_0', 'c21_0_0', '(c21_0_0 || \' end \' || eid) as result']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/plink2'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (4, 5 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 4
assert all(x in pheno_file.index for x in range(1, 4 + 1))
expected_columns = ['IID'] + columns
assert len(pheno_file.columns) == len(expected_columns)
assert all(x.split()[-1] in pheno_file.columns for x in expected_columns)
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
# square results in float type
assert pheno_file.loc[1, 'result'] == 'Option number 1 end 1'
assert pheno_file.loc[2, 'result'] == 'Option number 2 end 2'
assert pheno_file.loc[3, 'result'] == 'Option number 3 end 3'
assert pheno_file.loc[4, 'result'] == 'Option number 4 end 4'
def test_phenotype_query_format_pheno_missing_data(self):
# Prepare
columns = ['c21_0_0', 'c21_1_0', 'c48_0_0']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/plink2'})
# Validate
assert response.status_code == 200, response.status_code
# na_values='' is necessary to not overwrite NA strings here
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t',
na_values='', keep_default_na=False, index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (4, 3 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 4
assert all(x in pheno_file.index for x in range(1, 4 + 1))
expected_columns = ['IID'] + columns
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[1, 'c21_0_0'] == 'Option number 1'
assert pheno_file.loc[2, 'c21_0_0'] == 'Option number 2'
assert pheno_file.loc[3, 'c21_0_0'] == 'Option number 3'
assert pheno_file.loc[4, 'c21_0_0'] == 'Option number 4'
assert pheno_file.loc[1, 'c21_1_0'] == 'No response'
assert pheno_file.loc[2, 'c21_1_0'] == 'NA'
assert pheno_file.loc[3, 'c21_1_0'] == 'Of course'
assert pheno_file.loc[4, 'c21_1_0'] == 'I don\'t know'
assert pheno_file.loc[1, 'c48_0_0'] == '2011-08-14'
assert pheno_file.loc[2, 'c48_0_0'] == '2016-11-30'
assert pheno_file.loc[3, 'c48_0_0'] == '2010-01-01'
assert pheno_file.loc[4, 'c48_0_0'] == '2011-02-15'
def test_phenotype_query_format_pheno_missing_date(self):
# Prepare
self.setUp('pheno2sql/example05_missing_date.csv')
columns = ['c21_0_0', 'c21_1_0', 'c48_0_0']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'text/plink2'})
# Validate
assert response.status_code == 200, response.status_code
# na_values='' is necessary to not overwrite NA strings here
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t',
na_values='', keep_default_na=False, index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (4, 3 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 4
assert all(x in pheno_file.index for x in range(1, 4 + 1))
expected_columns = ['IID'] + columns
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[1, 'c48_0_0'] == '2011-08-14'
assert pheno_file.loc[2, 'c48_0_0'] == '2016-11-30'
assert pheno_file.loc[3, 'c48_0_0'] == 'NA'
assert pheno_file.loc[4, 'c48_0_0'] == '2011-02-15'
def test_phenotype_query_multiple_column_no_format(self):
# Prepare
columns = ['c21_0_0', 'c48_0_0']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters)
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (4, 2 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 4
assert all(x in pheno_file.index for x in range(1, 4 + 1))
expected_columns = ['IID'] + columns
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
# column order
assert pheno_file.columns.tolist()[0] == 'IID'
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[1, 'c21_0_0'] == 'Option number 1'
assert pheno_file.loc[2, 'c21_0_0'] == 'Option number 2'
assert pheno_file.loc[3, 'c21_0_0'] == 'Option number 3'
assert pheno_file.loc[4, 'c21_0_0'] == 'Option number 4'
assert pheno_file.loc[1, 'c48_0_0'] == '2011-08-14'
assert pheno_file.loc[2, 'c48_0_0'] == '2016-11-30'
assert pheno_file.loc[3, 'c48_0_0'] == '2010-01-01'
assert pheno_file.loc[4, 'c48_0_0'] == '2011-02-15'
def test_phenotype_query_multiple_column_format_not_supported(self):
# Prepare
columns = ['c21_0_0', 'c48_0_0']
parameters = {
'columns': columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype',
query_string=parameters, headers={'accept': 'application/json'})
# Validate
assert response.status_code == 400, response.status_code
data = json.load(io.StringIO(response.data.decode('utf-8')))
assert 'status_code' in data, data
assert data['status_code'] == 400, data['status_code']
assert 'error_type' in data, data
assert data['error_type'] == 'UNKNOWN', data['error_type']
assert 'message' in data, data
assert 'are supported' in str(data['message']), data['message']
assert 'text/plink2' in str(data['message']), data['message']
def test_phenotype_query_with_filtering(self):
# Prepare
columns = ['c21_0_0', 'c21_2_0', 'c47_0_0', 'c48_0_0']
filtering = ["c48_0_0 > '2011-01-01'", "c21_2_0 <> ''"]
parameters = {
'columns': columns,
'filters': filtering,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype', query_string=parameters)
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape[0] == 2
assert pheno_file.shape[1] == 4 + 1 # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 2
assert all(x in pheno_file.index for x in (1, 2))
expected_columns = ['IID'] + columns
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
# column order
assert pheno_file.columns.tolist()[0] == 'IID'
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[1, 'c21_0_0'] == 'Option number 1'
assert pheno_file.loc[2, 'c21_0_0'] == 'Option number 2'
assert pheno_file.loc[1, 'c21_2_0'] == 'Yes'
assert pheno_file.loc[2, 'c21_2_0'] == 'No'
assert pheno_file.loc[1, 'c47_0_0'] == '45.55412'
assert pheno_file.loc[2, 'c47_0_0'] == '-0.55461'
assert pheno_file.loc[1, 'c48_0_0'] == '2011-08-14'
assert pheno_file.loc[2, 'c48_0_0'] == '2016-11-30'
def test_phenotype_query_columns_with_regular_expression_and_standard_columns(self):
# Prepare
self.setUp('pheno2sql/example09_with_arrays.csv')
columns = ['c21_0_0', 'c48_0_0']
reg_exp_columns = ['c84_0_\d+']
parameters = {
'columns': columns,
'ecolumns': reg_exp_columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype', query_string=parameters)
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',
keep_default_na=False, index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (5, 5 + 1), pheno_file.shape # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 5
assert all(x in pheno_file.index for x in range(1, 5 + 1))
expected_columns = ['IID'] + columns + ['c84_0_0', 'c84_0_1', 'c84_0_2']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
# column order
assert pheno_file.columns.tolist()[0] == 'IID'
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[5, 'IID'] == '5'
assert pheno_file.loc[1, 'c21_0_0'] == 'Option number 1'
assert pheno_file.loc[2, 'c21_0_0'] == 'Option number 2'
assert pheno_file.loc[3, 'c21_0_0'] == 'Option number 3'
assert pheno_file.loc[4, 'c21_0_0'] == "Option number 4"
assert pheno_file.loc[5, 'c21_0_0'] == "Option number 5"
assert pheno_file.loc[1, 'c48_0_0'] == '2010-07-14'
assert pheno_file.loc[2, 'c48_0_0'] == '2017-11-30'
assert pheno_file.loc[3, 'c48_0_0'] == '2020-01-01'
assert pheno_file.loc[4, 'c48_0_0'] == '1990-02-15'
assert pheno_file.loc[5, 'c48_0_0'] == '1999-10-11'
assert pheno_file.loc[1, 'c84_0_0'] == '11', pheno_file.loc[1, 'c84_0_0']
assert pheno_file.loc[2, 'c84_0_0'] == '-21'
assert pheno_file.loc[3, 'c84_0_0'] == 'NA'
assert pheno_file.loc[4, 'c84_0_0'] == '41'
assert pheno_file.loc[5, 'c84_0_0'] == '51'
assert pheno_file.loc[1, 'c84_0_1'] == '1', pheno_file.loc[1, 'c84_0_1']
assert pheno_file.loc[2, 'c84_0_1'] == '99'
assert pheno_file.loc[3, 'c84_0_1'] == '98'
assert pheno_file.loc[4, 'c84_0_1'] == '-37'
assert pheno_file.loc[5, 'c84_0_1'] == '36'
assert pheno_file.loc[1, 'c84_0_2'] == '999'
assert pheno_file.loc[2, 'c84_0_2'] == '152'
assert pheno_file.loc[3, 'c84_0_2'] == '-68'
assert pheno_file.loc[4, 'c84_0_2'] == 'NA'
assert pheno_file.loc[5, 'c84_0_2'] == '-445'
def test_phenotype_query_columns_with_regular_expression_only(self):
# Prepare
self.setUp('pheno2sql/example09_with_arrays.csv')
reg_exp_columns = ['c84_0_\d+']
parameters = {
'ecolumns': reg_exp_columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype', query_string=parameters)
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',
keep_default_na=False, index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (5, 3 + 1), pheno_file.shape # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 5
assert all(x in pheno_file.index for x in range(1, 5 + 1))
expected_columns = ['IID'] + ['c84_0_0', 'c84_0_1', 'c84_0_2']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
# column order
assert pheno_file.columns.tolist()[0] == 'IID'
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[5, 'IID'] == '5'
assert pheno_file.loc[1, 'c84_0_0'] == '11', pheno_file.loc[1, 'c84_0_0']
assert pheno_file.loc[2, 'c84_0_0'] == '-21'
assert pheno_file.loc[3, 'c84_0_0'] == 'NA'
assert pheno_file.loc[4, 'c84_0_0'] == '41'
assert pheno_file.loc[5, 'c84_0_0'] == '51'
assert pheno_file.loc[1, 'c84_0_1'] == '1', pheno_file.loc[1, 'c84_0_1']
assert pheno_file.loc[2, 'c84_0_1'] == '99'
assert pheno_file.loc[3, 'c84_0_1'] == '98'
assert pheno_file.loc[4, 'c84_0_1'] == '-37'
assert pheno_file.loc[5, 'c84_0_1'] == '36'
assert pheno_file.loc[1, 'c84_0_2'] == '999'
assert pheno_file.loc[2, 'c84_0_2'] == '152'
assert pheno_file.loc[3, 'c84_0_2'] == '-68'
assert pheno_file.loc[4, 'c84_0_2'] == 'NA'
assert pheno_file.loc[5, 'c84_0_2'] == '-445'
def test_phenotype_query_columns_pheno2sql_instance_not_loaded(self):
"""This test uses a different Pheno2SQL instance without previous loading"""
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
csvs = (csv01, csv02)
# first load data
self.setUp(csvs)
# then create another instance without executing load_data method
self.setUp(csvs, load_data=False, wipe_database=False)
columns = ['c48_0_0', 'c120_0_0 as c120', 'c150_0_0 c150']
reg_exp_columns = ['c21_[01]_0', 'c100_\d_0']
parameters = {
'columns': columns,
'ecolumns': reg_exp_columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype', query_string=parameters)
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',
keep_default_na=False, index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (5, 8 + 1), pheno_file.shape # plus IID
assert pheno_file.index.name == 'FID'
assert len(pheno_file.index) == 5
assert all(x in pheno_file.index for x in range(1, 5 + 1))
expected_columns = ['IID'] + ['c21_0_0', 'c21_1_0', 'c48_0_0', 'c120', 'c150', 'c100_0_0', 'c100_1_0', 'c100_2_0']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
# column order
assert pheno_file.columns.tolist()[0] == 'IID'
assert pheno_file.loc[1, 'IID'] == '1'
assert pheno_file.loc[2, 'IID'] == '2'
assert pheno_file.loc[3, 'IID'] == '3'
assert pheno_file.loc[4, 'IID'] == '4'
assert pheno_file.loc[5, 'IID'] == '5'
assert pheno_file.loc[1, 'c21_0_0'] == 'Option number 1'
assert pheno_file.loc[2, 'c21_0_0'] == 'Option number 2'
assert pheno_file.loc[3, 'c21_0_0'] == 'Option number 3'
assert pheno_file.loc[4, 'c21_0_0'] == 'Option number 4'
assert pheno_file.loc[5, 'c21_0_0'] == 'Option number 5'
assert pheno_file.loc[1, 'c21_1_0'] == 'No response'
assert pheno_file.loc[2, 'c21_1_0'] == 'NA'
assert pheno_file.loc[3, 'c21_1_0'] == 'Of course'
assert pheno_file.loc[4, 'c21_1_0'] == "I don't know"
assert pheno_file.loc[5, 'c21_1_0'] == 'Maybe'
assert pheno_file.loc[1, 'c48_0_0'] == '2010-07-14'
assert pheno_file.loc[2, 'c48_0_0'] == '2017-11-30'
assert pheno_file.loc[3, 'c48_0_0'] == '2020-01-01'
assert pheno_file.loc[4, 'c48_0_0'] == '1990-02-15'
assert pheno_file.loc[5, 'c48_0_0'] == '1999-10-11'
assert pheno_file.loc[1, 'c100_0_0'] == '-9', pheno_file.loc[1, 'c100_0_0']
assert pheno_file.loc[2, 'c100_0_0'] == '-2'
assert pheno_file.loc[3, 'c100_0_0'] == 'NA'
assert pheno_file.loc[4, 'c100_0_0'] == 'NA'
assert pheno_file.loc[5, 'c100_0_0'] == 'NA'
assert pheno_file.loc[1, 'c100_1_0'] == '3', pheno_file.loc[1, 'c100_1_0']
assert pheno_file.loc[2, 'c100_1_0'] == '3'
assert pheno_file.loc[3, 'c100_1_0'] == '-4'
assert pheno_file.loc[4, 'c100_1_0'] == 'NA'
assert pheno_file.loc[5, 'c100_1_0'] == 'NA'
assert pheno_file.loc[1, 'c100_2_0'] == 'NA', pheno_file.loc[1, 'c100_2_0']
assert pheno_file.loc[2, 'c100_2_0'] == '1'
assert pheno_file.loc[3, 'c100_2_0'] == '-10'
assert pheno_file.loc[4, 'c100_2_0'] == 'NA'
assert pheno_file.loc[5, 'c100_2_0'] == 'NA'
def test_phenotype_query_http_basic_auth_is_null(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
csvs = (csv01, csv02)
# first load data
self.setUp(csvs)
# then create another instance without executing load_data method
self.setUp(csvs, load_data=False, wipe_database=False)
def configure_http_auth(theapp):
theapp.config['auth'] = None
self.configureApp(configure_http_auth)
columns = ['c48_0_0', 'c120_0_0 as c120', 'c150_0_0 c150']
reg_exp_columns = ['c21_[01]_0', 'c100_\d_0']
parameters = {
'columns': columns,
'ecolumns': reg_exp_columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype', query_string=parameters)
# Validate
# unauthorized
assert response.status_code == 200, response.status_code
def test_phenotype_query_http_basic_auth_no_user_pass(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
csvs = (csv01, csv02)
# first load data
self.setUp(csvs)
# then create another instance without executing load_data method
self.setUp(csvs, load_data=False, wipe_database=False)
self.configureAppWithAuth('user: thepassword2')
columns = ['c48_0_0', 'c120_0_0 as c120', 'c150_0_0 c150']
reg_exp_columns = ['c21_[01]_0', 'c100_\d_0']
parameters = {
'columns': columns,
'ecolumns': reg_exp_columns,
}
# Run
response = self.app.get('/ukbrest/api/v1.0/phenotype', query_string=parameters)
# Validate
# unauthorized
assert response.status_code == 401, response.status_code
def test_phenotype_query_http_basic_auth_with_user_pass(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
csvs = (csv01, csv02)
# first load data
self.setUp(csvs)
# then create another instance without executing load_data method
self.setUp(csvs, load_data=False, wipe_database=False)
self.configureAppWithAuth('user: thepassword2')
columns = ['c48_0_0', 'c120_0_0 as c120', 'c150_0_0 c150']
reg_exp_columns = ['c21_[01]_0', 'c100_\d_0']
parameters = {
'columns': columns,
'ecolumns': reg_exp_columns,
}
# Run
response = self.app.get(
'/ukbrest/api/v1.0/phenotype',
query_string=parameters,
headers=self._get_http_basic_auth_header('user', 'thepassword2'),
)
# Validate
# unauthorized
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', na_values='',
keep_default_na=False, index_col='FID', dtype=str)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (5, 8 + 1), pheno_file.shape # plus IID
def test_phenotype_query_http_basic_auth_with_wrong_pass(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
csvs = (csv01, csv02)
# first load data
self.setUp(csvs)
# then create another instance without executing load_data method
self.setUp(csvs, load_data=False, wipe_database=False)
self.configureAppWithAuth('user: anotherpass')
columns = ['c48_0_0', 'c120_0_0 as c120', 'c150_0_0 c150']
reg_exp_columns = ['c21_[01]_0', 'c100_\d_0']
parameters = {
'columns': columns,
'ecolumns': reg_exp_columns,
}
# Run
response = self.app.get(
'/ukbrest/api/v1.0/phenotype',
query_string=parameters,
headers=self._get_http_basic_auth_header('user', 'thepassword2')
)
# Validate
# unauthorized
assert response.status_code == 401, response.status_code
def test_phenotype_query_http_basic_auth_with_wrong_user(self):
# Prepare
csv01 = get_repository_path('pheno2sql/example08_01.csv')
csv02 = get_repository_path('pheno2sql/example08_02.csv')
csvs = (csv01, csv02)
# first load data
self.setUp(csvs)
# then create another instance without executing load_data method
self.setUp(csvs, load_data=False, wipe_database=False)
self.configureAppWithAuth('anotheruser: thepassword2')
columns = ['c48_0_0', 'c120_0_0 as c120', 'c150_0_0 c150']
reg_exp_columns = ['c21_[01]_0', 'c100_\d_0']
parameters = {
'columns': columns,
'ecolumns': reg_exp_columns,
}
# Run
response = self.app.get(
'/ukbrest/api/v1.0/phenotype',
query_string=parameters,
headers=self._get_http_basic_auth_header('user', 'thepassword2'),
)
# Validate
# unauthorized
assert response.status_code == 401, response.status_code
def test_phenotype_query_yaml_get_covariates(self):
# Prepare
self.setUp('pheno2sql/example10/example10_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
yaml_data = b"""
covariates:
field_name_34: c34_0_0
field_name_47: c47_0_0
fields:
instance0: c21_0_0
instance1: c21_1_0
instance2: c21_2_0
"""
# Run
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'covariates',
})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str,
na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (5, 2 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert all(x in pheno_file.index for x in (1000010, 1000020, 1000030, 1000040, 1000050))
expected_columns = ['IID'] + ['field_name_34', 'field_name_47']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
# column order
assert pheno_file.columns.tolist()[0] == 'IID'
assert pheno_file.loc[1000010, 'IID'] == '1000010'
assert pheno_file.loc[1000010, 'field_name_34'] == '-33'
assert pheno_file.loc[1000010, 'field_name_47'] == '41.55312'
assert pheno_file.loc[1000020, 'IID'] == '1000020'
assert pheno_file.loc[1000020, 'field_name_34'] == '34'
assert pheno_file.loc[1000020, 'field_name_47'] == '-10.51461'
assert pheno_file.loc[1000030, 'IID'] == '1000030'
assert pheno_file.loc[1000030, 'field_name_34'] == '0'
assert pheno_file.loc[1000030, 'field_name_47'] == '-35.31471'
assert pheno_file.loc[1000040, 'IID'] == '1000040'
assert pheno_file.loc[1000040, 'field_name_34'] == '3'
assert pheno_file.loc[1000040, 'field_name_47'] == '5.20832'
assert pheno_file.loc[1000050, 'IID'] == '1000050'
assert pheno_file.loc[1000050, 'field_name_34'] == '-4'
assert pheno_file.loc[1000050, 'field_name_47'] == 'NA'
def test_phenotype_query_yaml_get_covariates_http_auth_with_no_credentials(self):
# Prepare
self.setUp('pheno2sql/example10/example10_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
self.configureAppWithAuth('user: thepassword2')
yaml_data = b"""
covariates:
field_name_34: c34_0_0
field_name_47: c47_0_0
fields:
instance0: c21_0_0
instance1: c21_1_0
instance2: c21_2_0
"""
# Run
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'covariates',
})
# Validate
assert response.status_code == 401, response.status_code
def test_phenotype_query_yaml_get_covariates_http_auth_with_credentials(self):
# Prepare
self.setUp('pheno2sql/example10/example10_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
self.configureAppWithAuth('user: thepassword2')
yaml_data = b"""
covariates:
field_name_34: c34_0_0
field_name_47: c47_0_0
fields:
instance0: c21_0_0
instance1: c21_1_0
instance2: c21_2_0
"""
# Run
response = self.app.post(
'/ukbrest/api/v1.0/query',
data={
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'covariates',
},
headers=self._get_http_basic_auth_header('user', 'thepassword2'),
)
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str,
na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (5, 2 + 1) # plus IID
def test_phenotype_query_yaml_get_fields(self):
# Prepare
self.setUp('pheno2sql/example10/example10_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
yaml_data = b"""
covariates:
field_name_34: c34_0_0
field_name_47: c47_0_0
fields:
instance0: c21_0_0
instance1: c21_1_0
instance2: c21_2_0
"""
# Run
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'fields',
})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str,
na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (5, 3 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert all(x in pheno_file.index for x in (1000010, 1000020, 1000030, 1000040, 1000050))
expected_columns = ['IID'] + ['instance0', 'instance1', 'instance2']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
# column order
assert pheno_file.columns.tolist()[0] == 'IID'
assert pheno_file.loc[1000010, 'IID'] == '1000010'
assert pheno_file.loc[1000010, 'instance0'] == 'Option number 1'
assert pheno_file.loc[1000010, 'instance1'] == 'No response'
assert pheno_file.loc[1000010, 'instance2'] == 'Yes'
assert pheno_file.loc[1000040, 'IID'] == '1000040'
assert pheno_file.loc[1000040, 'instance0'] == 'Option number 4'
assert pheno_file.loc[1000040, 'instance1'] == "I don't know"
assert pheno_file.loc[1000040, 'instance2'] == 'NA'
def test_phenotype_query_yaml_filter_samples_with_include_only(self):
# Prepare
self.setUp('pheno2sql/example10/example10_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
yaml_data = b"""
samples_filters:
- c47_0_0 > 0
covariates:
field_name_34: c34_0_0
field_name_47: c47_0_0
fields:
instance0: c21_0_0
instance1: c21_1_0
instance2: c21_2_0
"""
N_EXPECTED_SAMPLES = 2
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'fields',
})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str,
na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3 + 1), pheno_file.shape # plus IID
assert pheno_file.index.name == 'FID'
assert all(x in pheno_file.index for x in (1000010, 1000040)), pheno_file.index.tolist()
expected_columns = ['IID'] + ['instance0', 'instance1', 'instance2']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
# column order
assert pheno_file.columns.tolist()[0] == 'IID'
assert pheno_file.loc[1000010, 'IID'] == '1000010'
assert pheno_file.loc[1000010, 'instance0'] == 'Option number 1'
assert pheno_file.loc[1000010, 'instance1'] == 'No response'
assert pheno_file.loc[1000010, 'instance2'] == 'Yes'
assert pheno_file.loc[1000040, 'IID'] == '1000040'
assert pheno_file.loc[1000040, 'instance0'] == 'Option number 4'
assert pheno_file.loc[1000040, 'instance1'] == "I don't know"
assert pheno_file.loc[1000040, 'instance2'] == 'NA'
#
# Ask covariates
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'covariates',
})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str,
na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 2 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert all(x in pheno_file.index for x in (1000010, 1000040))
expected_columns = ['IID'] + ['field_name_34', 'field_name_47']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
# column order
assert pheno_file.columns.tolist()[0] == 'IID'
assert pheno_file.loc[1000010, 'IID'] == '1000010'
assert pheno_file.loc[1000010, 'field_name_34'] == '-33'
assert pheno_file.loc[1000010, 'field_name_47'] == '41.55312'
assert pheno_file.loc[1000040, 'IID'] == '1000040'
assert pheno_file.loc[1000040, 'field_name_34'] == '3'
assert pheno_file.loc[1000040, 'field_name_47'] == '5.20832'
def test_phenotype_query_yaml_filter_samples_condition_breaking_for_fields_and_covariates(self):
# Prepare
self.setUp('pheno2sql/example10/example10_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
yaml_data = b"""
samples_filters:
- c47_0_0 > 0
- c46_0_0 < 0 or c46_0_0 = 4 or c46_0_0 = 1
covariates:
field_name_34: c34_0_0
field_name_47: c47_0_0
fields:
instance0: c21_0_0
instance1: c21_1_0
instance2: c21_2_0
"""
N_EXPECTED_SAMPLES = 2
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'fields',
})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str,
na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3 + 1), pheno_file.shape # plus IID
assert pheno_file.index.name == 'FID'
assert all(x in pheno_file.index for x in (1000010, 1000040)), pheno_file.index.tolist()
expected_columns = ['IID'] + ['instance0', 'instance1', 'instance2']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
# column order
assert pheno_file.columns.tolist()[0] == 'IID'
assert pheno_file.loc[1000010, 'IID'] == '1000010'
assert pheno_file.loc[1000010, 'instance0'] == 'Option number 1'
assert pheno_file.loc[1000010, 'instance1'] == 'No response'
assert pheno_file.loc[1000010, 'instance2'] == 'Yes'
assert pheno_file.loc[1000040, 'IID'] == '1000040'
assert pheno_file.loc[1000040, 'instance0'] == 'Option number 4'
assert pheno_file.loc[1000040, 'instance1'] == "I don't know"
assert pheno_file.loc[1000040, 'instance2'] == 'NA'
#
# Ask covariates
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'covariates',
})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), sep='\t', index_col='FID', dtype=str,
na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 2 + 1) # plus IID
assert pheno_file.index.name == 'FID'
assert all(x in pheno_file.index for x in (1000010, 1000040))
expected_columns = ['IID'] + ['field_name_34', 'field_name_47']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
# column order
assert pheno_file.columns.tolist()[0] == 'IID'
assert pheno_file.loc[1000010, 'IID'] == '1000010'
assert pheno_file.loc[1000010, 'field_name_34'] == '-33'
assert pheno_file.loc[1000010, 'field_name_47'] == '41.55312'
assert pheno_file.loc[1000040, 'IID'] == '1000040'
assert pheno_file.loc[1000040, 'field_name_34'] == '3'
assert pheno_file.loc[1000040, 'field_name_47'] == '5.20832'
def test_phenotype_query_yaml_specify_bgenie_format(self):
# Prepare
self.setUp('pheno2sql/example10/example10_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
yaml_data = b"""
samples_filters:
- c47_0_0 > 0
covariates:
field_name_34: c34_0_0
field_name_47: c47_0_0
fields:
instance0: c21_0_0
instance1: c21_1_0
instance2: c21_2_0
"""
N_EXPECTED_SAMPLES = 5
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'fields',
'missing_code': '-999',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3), pheno_file.shape
expected_columns = ['instance0', 'instance1', 'instance2']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'instance0'] == '-999', pheno_file.loc[0, 'instance0']
assert pheno_file.loc[0, 'instance1'] == '-999'
assert pheno_file.loc[0, 'instance2'] == '-999'
assert pheno_file.loc[1, 'instance0'] == '-999'
assert pheno_file.loc[1, 'instance1'] == '-999'
assert pheno_file.loc[1, 'instance2'] == '-999'
assert pheno_file.loc[2, 'instance0'] == 'Option number 4'
assert pheno_file.loc[2, 'instance1'] == "I don't know"
assert pheno_file.loc[2, 'instance2'] == '-999'
assert pheno_file.loc[3, 'instance0'] == 'Option number 1'
assert pheno_file.loc[3, 'instance1'] == 'No response'
assert pheno_file.loc[3, 'instance2'] == 'Yes'
assert pheno_file.loc[4, 'instance0'] == '-999'
assert pheno_file.loc[4, 'instance1'] == '-999'
assert pheno_file.loc[4, 'instance2'] == '-999'
#
# Ask covariates
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'covariates',
'missing_code': '-999',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 2)
expected_columns = ['field_name_34', 'field_name_47']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'field_name_34'] == '-999'
assert pheno_file.loc[0, 'field_name_47'] == '-999'
assert pheno_file.loc[1, 'field_name_34'] == '-999'
assert pheno_file.loc[1, 'field_name_47'] == '-999'
assert pheno_file.loc[2, 'field_name_34'] == '3'
assert pheno_file.loc[2, 'field_name_47'] == '5.20832'
assert pheno_file.loc[3, 'field_name_34'] == '-33'
assert pheno_file.loc[3, 'field_name_47'] == '41.55312'
assert pheno_file.loc[4, 'field_name_34'] == '-999'
assert pheno_file.loc[4, 'field_name_47'] == '-999'
def test_phenotype_query_yaml_specify_csv_format(self):
# Prepare
self.setUp('pheno2sql/example10/example10_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
yaml_data = b"""
samples_filters:
- c47_0_0 > 0
covariates:
field_name_34: c34_0_0
field_name_47: c47_0_0
fields:
instance0: c21_0_0
instance1: c21_1_0
instance2: c21_2_0
"""
N_EXPECTED_SAMPLES = 2
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'fields',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3), pheno_file.shape
expected_columns = ['instance0', 'instance1', 'instance2']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1000040, 'instance0'] == 'Option number 4'
assert pheno_file.loc[1000040, 'instance1'] == "I don't know"
assert pheno_file.loc[1000040, 'instance2'] == 'NA'
assert pheno_file.loc[1000010, 'instance0'] == 'Option number 1'
assert pheno_file.loc[1000010, 'instance1'] == 'No response'
assert pheno_file.loc[1000010, 'instance2'] == 'Yes'
#
# Ask covariates
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'covariates',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 2)
expected_columns = ['field_name_34', 'field_name_47']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1000040, 'field_name_34'] == '3'
assert pheno_file.loc[1000040, 'field_name_47'] == '5.20832'
assert pheno_file.loc[1000010, 'field_name_34'] == '-33'
assert pheno_file.loc[1000010, 'field_name_47'] == '41.55312'
def test_phenotype_query_yaml_specify_bgenie_format_missing_code_default(self):
# Prepare
self.setUp('pheno2sql/example10/example10_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
yaml_data = b"""
samples_filters:
- c47_0_0 > 0
covariates:
field_name_34: c34_0_0
field_name_47: c47_0_0
fields:
instance0: c21_0_0
instance1: c21_1_0
instance2: c21_2_0
"""
N_EXPECTED_SAMPLES = 5
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'fields',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3), pheno_file.shape
expected_columns = ['instance0', 'instance1', 'instance2']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'instance0'] == 'NA'
assert pheno_file.loc[0, 'instance1'] == 'NA'
assert pheno_file.loc[0, 'instance2'] == 'NA'
assert pheno_file.loc[1, 'instance0'] == 'NA'
assert pheno_file.loc[1, 'instance1'] == 'NA'
assert pheno_file.loc[1, 'instance2'] == 'NA'
assert pheno_file.loc[2, 'instance0'] == 'Option number 4'
assert pheno_file.loc[2, 'instance1'] == "I don't know"
assert pheno_file.loc[2, 'instance2'] == 'NA'
assert pheno_file.loc[3, 'instance0'] == 'Option number 1'
assert pheno_file.loc[3, 'instance1'] == 'No response'
assert pheno_file.loc[3, 'instance2'] == 'Yes'
assert pheno_file.loc[4, 'instance0'] == 'NA'
assert pheno_file.loc[4, 'instance1'] == 'NA'
assert pheno_file.loc[4, 'instance2'] == 'NA'
def test_phenotype_query_yaml_specify_csv_format_missing_code_changed(self):
# Prepare
self.setUp('pheno2sql/example10/example10_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example10/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
yaml_data = b"""
samples_filters:
- c47_0_0 > 0
covariates:
field_name_34: c34_0_0
field_name_47: c47_0_0
fields:
instance0: c21_0_0
instance1: c21_1_0
instance2: c21_2_0
"""
N_EXPECTED_SAMPLES = 2
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'fields',
'missing_code': '-999',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3), pheno_file.shape
expected_columns = ['instance0', 'instance1', 'instance2']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1000040, 'instance0'] == 'Option number 4'
assert pheno_file.loc[1000040, 'instance1'] == "I don't know"
assert pheno_file.loc[1000040, 'instance2'] == '-999'
assert pheno_file.loc[1000010, 'instance0'] == 'Option number 1'
assert pheno_file.loc[1000010, 'instance1'] == 'No response'
assert pheno_file.loc[1000010, 'instance2'] == 'Yes'
def test_phenotype_query_yaml_disease_by_coding_first_bgenie(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=10)
yaml_data = b"""
samples_filters:
- c34_0_0 >= -5
data:
disease0:
case_control:
84:
coding: [N308]
"""
N_EXPECTED_SAMPLES = 6
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['disease0']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'disease0'] == '0' # 1000050
assert pheno_file.loc[1, 'disease0'] == 'NA' # 1000030
assert pheno_file.loc[2, 'disease0'] == '1' # 1000040
assert pheno_file.loc[3, 'disease0'] == 'NA' # 1000010
assert pheno_file.loc[4, 'disease0'] == '1' # 1000020
assert pheno_file.loc[5, 'disease0'] == '0' # 1000070
# 1000060 is "not genotyped" (it is not listed in BGEN's samples file)
def test_phenotype_query_yaml_disease_by_coding_second_bgenie(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=20)
yaml_data = b"""
samples_filters:
- c34_0_0 >= -5
data:
disease0:
case_control:
84:
coding: [E103]
"""
N_EXPECTED_SAMPLES = 6
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['disease0']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'disease0'] == '1' # 1000050
assert pheno_file.loc[1, 'disease0'] == 'NA' # 1000030
assert pheno_file.loc[2, 'disease0'] == '1' # 1000040
assert pheno_file.loc[3, 'disease0'] == 'NA' # 1000010
assert pheno_file.loc[4, 'disease0'] == '1' # 1000020
assert pheno_file.loc[5, 'disease0'] == '0' # 1000070
# 1000060 is "not genotyped" (it is not listed in BGEN's samples file)
def test_phenotype_query_yaml_disease_by_coding_different_filter_bgenie(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=20)
yaml_data = b"""
samples_filters:
- c31_0_0 > '2001-01-01'
data:
disease0:
case_control:
84:
coding: [E103]
"""
N_EXPECTED_SAMPLES = 6
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['disease0']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'disease0'] == 'NA' # 1000050
assert pheno_file.loc[1, 'disease0'] == 'NA' # 1000030
assert pheno_file.loc[2, 'disease0'] == 'NA' # 1000040
assert pheno_file.loc[3, 'disease0'] == '1' # 1000010
assert pheno_file.loc[4, 'disease0'] == '1' # 1000020
assert pheno_file.loc[5, 'disease0'] == '0' # 1000070
# 1000060 is "not genotyped" (it is not listed in BGEN's samples file)
def test_phenotype_query_yaml_disease_by_coding_filter_includes_nulls_bgenie(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=20)
yaml_data = b"""
samples_filters:
- c31_0_0 is null or c31_0_0 > '2001-01-01'
data:
disease0:
case_control:
84:
coding: [E103]
"""
N_EXPECTED_SAMPLES = 6
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['disease0']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'disease0'] == '1' # 1000050
assert pheno_file.loc[1, 'disease0'] == 'NA' # 1000030
assert pheno_file.loc[2, 'disease0'] == 'NA' # 1000040
assert pheno_file.loc[3, 'disease0'] == '1' # 1000010
assert pheno_file.loc[4, 'disease0'] == '1' # 1000020
assert pheno_file.loc[5, 'disease0'] == '0' # 1000070
# 1000060 is "not genotyped" (it is not listed in BGEN's samples file)
def test_phenotype_query_yaml_disease_by_coding_multiple_filters_using_like_bgenie(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=20)
yaml_data = b"""
samples_filters:
- c31_0_0 is null or c31_0_0 > '2001-01-01'
- c21_2_0 not like '%%obab%%'
data:
disease0:
case_control:
84:
coding: [E103]
"""
N_EXPECTED_SAMPLES = 6
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['disease0']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'disease0'] == 'NA' # 1000050
assert pheno_file.loc[1, 'disease0'] == 'NA' # 1000030
assert pheno_file.loc[2, 'disease0'] == 'NA' # 1000040
assert pheno_file.loc[3, 'disease0'] == '1' # 1000010
assert pheno_file.loc[4, 'disease0'] == '1' # 1000020
assert pheno_file.loc[5, 'disease0'] == '0' # 1000070
# 1000060 is "not genotyped" (it is not listed in BGEN's samples file)
def test_phenotype_query_yaml_disease_by_coding_fields_in_filters_are_in_different_tables_bgenie(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
yaml_data = b"""
samples_filters:
- c21_1_0 not like '%%respo%%'
- c47_0_0 > 0
data:
disease0:
case_control:
84:
coding: [Q750]
"""
N_EXPECTED_SAMPLES = 6
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['disease0']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'disease0'] == 'NA' # 1000050
assert pheno_file.loc[1, 'disease0'] == 'NA' # 1000030
assert pheno_file.loc[2, 'disease0'] == '1' # 1000040
assert pheno_file.loc[3, 'disease0'] == 'NA' # 1000010
assert pheno_file.loc[4, 'disease0'] == 'NA' # 1000020
assert pheno_file.loc[5, 'disease0'] == '0' # 1000070
# 1000060 is "not genotyped" (it is not listed in BGEN's samples file)
def test_phenotype_query_yaml_disease_by_coding_different_data_field_bgenie(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
- eid not in (select eid from events where field_id = 84 and event in ('Q750'))
data:
disease0:
case_control:
85:
coding: [1114]
"""
N_EXPECTED_SAMPLES = 6
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['disease0']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'disease0'] == '1' # 1000050
assert pheno_file.loc[1, 'disease0'] == 'NA' # 1000030
assert pheno_file.loc[2, 'disease0'] == 'NA' # 1000040
assert pheno_file.loc[3, 'disease0'] == 'NA' # 1000010
assert pheno_file.loc[4, 'disease0'] == '1' # 1000020
assert pheno_file.loc[5, 'disease0'] == '0' # 1000070
# 1000060 is "not genotyped" (it is not listed in BGEN's samples file)
def test_phenotype_query_yaml_disease_by_coding_different_disease_name_bgenie(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
- eid not in (select eid from events where field_id = 84 and event in ('Q750'))
data:
another_disease_name:
case_control:
85:
coding: [1114]
"""
N_EXPECTED_SAMPLES = 6
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['another_disease_name']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'another_disease_name'] == '1' # 1000050
assert pheno_file.loc[1, 'another_disease_name'] == 'NA' # 1000030
assert pheno_file.loc[2, 'another_disease_name'] == 'NA' # 1000040
assert pheno_file.loc[3, 'another_disease_name'] == 'NA' # 1000010
assert pheno_file.loc[4, 'another_disease_name'] == '1' # 1000020
assert pheno_file.loc[5, 'another_disease_name'] == '0' # 1000070
# 1000060 is "not genotyped" (it is not listed in BGEN's samples file)
def test_phenotype_query_yaml_disease_by_coding_coding_not_list_bgenie(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
- eid not in (select eid from events where field_id = 84 and event in ('Q750'))
data:
another_disease_name:
case_control:
85:
coding: 1114
"""
N_EXPECTED_SAMPLES = 6
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['another_disease_name']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'another_disease_name'] == '1' # 1000050
assert pheno_file.loc[1, 'another_disease_name'] == 'NA' # 1000030
assert pheno_file.loc[2, 'another_disease_name'] == 'NA' # 1000040
assert pheno_file.loc[3, 'another_disease_name'] == 'NA' # 1000010
assert pheno_file.loc[4, 'another_disease_name'] == '1' # 1000020
assert pheno_file.loc[5, 'another_disease_name'] == '0' # 1000070
# 1000060 is "not genotyped" (it is not listed in BGEN's samples file)
def test_phenotype_query_yaml_disease_by_coding_coding_not_list_csv(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
- eid not in (select eid from events where field_id = 84 and event in ('Q750'))
data:
another_disease_name:
case_control:
85:
coding: 1114
"""
N_EXPECTED_SAMPLES = 4
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['another_disease_name']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1000050, 'another_disease_name'] == '1' # 1000050
# assert pheno_file.loc[1000030, 'another_disease_name'] == '0' # 1000030
# assert pheno_file.loc['1000040', 'another_disease_name'] == 'NA' # 1000040
# assert pheno_file.loc[1000010, 'another_disease_name'] == '1' # 1000010
assert pheno_file.loc[1000020, 'another_disease_name'] == '1' # 1000020
assert pheno_file.loc[1000070, 'another_disease_name'] == '0' # 1000070
assert pheno_file.loc[1000060, 'another_disease_name'] == '1' # 1000060
def test_phenotype_query_yaml_disease_by_coding_many_codings_bgenie(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe')
data:
another_disease_name:
case_control:
85:
coding: [1114, 1701]
"""
N_EXPECTED_SAMPLES = 6
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['another_disease_name']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'another_disease_name'] == 'NA' # 1000050
assert pheno_file.loc[1, 'another_disease_name'] == '0' # 1000030
assert pheno_file.loc[2, 'another_disease_name'] == 'NA' # 1000040
assert pheno_file.loc[3, 'another_disease_name'] == '1' # 1000010
assert pheno_file.loc[4, 'another_disease_name'] == '1' # 1000020
assert pheno_file.loc[5, 'another_disease_name'] == '0' # 1000070
# 1000060 is "not genotyped" (it is not listed in BGEN's samples file)
def test_phenotype_query_yaml_disease_by_coding_many_codings_csv(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe')
data:
another_disease_name:
case_control:
85:
coding: [1114, 1701]
"""
# text/csv does not fetch all samples in 'samples' table by default
N_EXPECTED_SAMPLES = 5
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['another_disease_name']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
# assert pheno_file.loc['1000050', 'another_disease_name'] == 'NA' # 1000050
assert pheno_file.loc[1000030, 'another_disease_name'] == '0' # 1000030
# assert pheno_file.loc['1000040', 'another_disease_name'] == 'NA' # 1000040
assert pheno_file.loc[1000010, 'another_disease_name'] == '1' # 1000010
assert pheno_file.loc[1000020, 'another_disease_name'] == '1' # 1000020
assert pheno_file.loc[1000070, 'another_disease_name'] == '0' # 1000070
assert pheno_file.loc[1000060, 'another_disease_name'] == '1' # 1000060
def test_phenotype_query_yaml_disease_by_coding_many_data_fields_bgenie(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# in this case the filters are not necessary, but it is forced to avoid a problem with joining that will
# be tested in another unit test
yaml_data = b"""
samples_filters:
- c21_2_0 is null or lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
data:
another_disease_name:
case_control:
85:
coding: [978, 1701]
84:
coding: [Z876, Z678]
"""
N_EXPECTED_SAMPLES = 6
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['another_disease_name']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'another_disease_name'] == '1' # 1000050
assert pheno_file.loc[1, 'another_disease_name'] == '1' # 1000030
assert pheno_file.loc[2, 'another_disease_name'] == '0' # 1000040
assert pheno_file.loc[3, 'another_disease_name'] == '1' # 1000010
assert pheno_file.loc[4, 'another_disease_name'] == '0' # 1000020
assert pheno_file.loc[5, 'another_disease_name'] == '1' # 1000070
# 1000060 is "not genotyped" (it is not listed in BGEN's samples file)
def test_phenotype_query_yaml_disease_by_coding_many_data_fields_csv(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# in this case the filters are not necessary, but it is forced to avoid a problem with joining that will
# be tested in another unit test
yaml_data = b"""
samples_filters:
- c21_2_0 is null or lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
data:
another_disease_name:
case_control:
85:
coding: [978, 1701]
84:
coding: [Z876, Z678]
"""
N_EXPECTED_SAMPLES = 7
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['another_disease_name']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1000050, 'another_disease_name'] == '1' # 1000050
assert pheno_file.loc[1000030, 'another_disease_name'] == '1' # 1000030
assert pheno_file.loc[1000040, 'another_disease_name'] == '0' # 1000040
assert pheno_file.loc[1000010, 'another_disease_name'] == '1' # 1000010
assert pheno_file.loc[1000020, 'another_disease_name'] == '0' # 1000020
assert pheno_file.loc[1000070, 'another_disease_name'] == '1' # 1000070
assert pheno_file.loc[1000060, 'another_disease_name'] == '1' # 1000060
def test_phenotype_query_yaml_disease_filters_not_referencing_table_bgenie(self):
"""This test forces a global table to obtain eid from for controls"""
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# in this case the filters are not necessary, but it is forced to avoid a problem with joining that will
# be tested in another unit test
yaml_data = b"""
samples_filters:
- 1 = 1
data:
another_disease_name:
case_control:
85:
coding: [978, 1701]
84:
coding: [Z876, Z678]
"""
N_EXPECTED_SAMPLES = 6
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['another_disease_name']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'another_disease_name'] == '1' # 1000050
assert pheno_file.loc[1, 'another_disease_name'] == '1' # 1000030
assert pheno_file.loc[2, 'another_disease_name'] == '0' # 1000040
assert pheno_file.loc[3, 'another_disease_name'] == '1' # 1000010
assert pheno_file.loc[4, 'another_disease_name'] == '0' # 1000020
assert pheno_file.loc[5, 'another_disease_name'] == '1' # 1000070
# 1000060 is "not genotyped" (it is not listed in BGEN's samples file)
def test_phenotype_query_yaml_disease_filters_not_referencing_table_csv(self):
"""This test forces a global table to obtain eid from for controls"""
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# in this case the filters are not necessary, but it is forced to avoid a problem with joining that will
# be tested in another unit test
yaml_data = b"""
samples_filters:
- 1 = 1
data:
another_disease_name:
case_control:
85:
coding: [978, 1701]
84:
coding: [Z876, Z678]
"""
N_EXPECTED_SAMPLES = 7
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['another_disease_name']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1000050, 'another_disease_name'] == '1' # 1000050
assert pheno_file.loc[1000030, 'another_disease_name'] == '1' # 1000030
assert pheno_file.loc[1000040, 'another_disease_name'] == '0' # 1000040
assert pheno_file.loc[1000010, 'another_disease_name'] == '1' # 1000010
assert pheno_file.loc[1000020, 'another_disease_name'] == '0' # 1000020
assert pheno_file.loc[1000070, 'another_disease_name'] == '1' # 1000070
assert pheno_file.loc[1000060, 'another_disease_name'] == '1' # 1000060
def test_phenotype_query_yaml_disease_no_filters_csv(self):
"""This test forces a global table to obtain eid from for controls"""
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# in this case the filters are not necessary, but it is forced to avoid a problem with joining that will
# be tested in another unit test
yaml_data = b"""
data:
another_disease_name:
case_control:
85:
coding: [978, 1701]
84:
coding: [Z876, Z678]
"""
N_EXPECTED_SAMPLES = 7
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['another_disease_name']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1000050, 'another_disease_name'] == '1' # 1000050
assert pheno_file.loc[1000030, 'another_disease_name'] == '1' # 1000030
assert pheno_file.loc[1000040, 'another_disease_name'] == '0' # 1000040
assert pheno_file.loc[1000010, 'another_disease_name'] == '1' # 1000010
assert pheno_file.loc[1000020, 'another_disease_name'] == '0' # 1000020
assert pheno_file.loc[1000070, 'another_disease_name'] == '1' # 1000070
assert pheno_file.loc[1000060, 'another_disease_name'] == '1' # 1000060
def test_phenotype_query_yaml_disease_many_columns_bgenie(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# in this case the filters are not necessary, but it is forced to avoid a problem with joining that will
# be tested in another unit test
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
- c34_0_0 > -10
data:
another_disease_name:
case_control:
85:
coding: [978, 1701]
84:
coding: [Z876, Z678]
second_column:
case_control:
85:
coding: 1114
third_column:
case_control:
84:
coding: [E103, Z678]
"""
N_EXPECTED_SAMPLES = 6
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/bgenie'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_table(io.StringIO(response.data.decode('utf-8')), sep=' ', header=0,
dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3), pheno_file.shape
expected_columns = ['another_disease_name', 'second_column', 'third_column']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[0, 'another_disease_name'] == '1' # 1000050
assert pheno_file.loc[1, 'another_disease_name'] == '1' # 1000030
assert pheno_file.loc[2, 'another_disease_name'] == 'NA' # 1000040
assert pheno_file.loc[3, 'another_disease_name'] == 'NA' # 1000010
assert pheno_file.loc[4, 'another_disease_name'] == '0' # 1000020
assert pheno_file.loc[5, 'another_disease_name'] == '1' # 1000070
# 1000060 is "not genotyped" (it is not listed in BGEN's samples file)
assert pheno_file.loc[0, 'second_column'] == '1' # 1000050
assert pheno_file.loc[1, 'second_column'] == '0' # 1000030
assert pheno_file.loc[2, 'second_column'] == 'NA' # 1000040
assert pheno_file.loc[3, 'second_column'] == 'NA' # 1000010
assert pheno_file.loc[4, 'second_column'] == '1' # 1000020
assert pheno_file.loc[5, 'second_column'] == '0' # 1000070
# 1000060 is "not genotyped" (it is not listed in BGEN's samples file)
assert pheno_file.loc[0, 'third_column'] == '1' # 1000050
assert pheno_file.loc[1, 'third_column'] == '0' # 1000030
assert pheno_file.loc[2, 'third_column'] == 'NA' # 1000040
assert pheno_file.loc[3, 'third_column'] == 'NA' # 1000010
assert pheno_file.loc[4, 'third_column'] == '1' # 1000020
assert pheno_file.loc[5, 'third_column'] == '1' # 1000070
# 1000060 is "not genotyped" (it is not listed in BGEN's samples file)
def test_phenotype_query_yaml_disease_many_columns_csv(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# in this case the filters are not necessary, but it is forced to avoid a problem with joining that will
# be tested in another unit test
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
- c34_0_0 > -10
data:
another_disease_name:
case_control:
85:
coding: [978, 1701]
84:
coding: [Z876, Z678]
second_column:
case_control:
85:
coding: 1114
third_column:
case_control:
84:
coding: [E103, Z678]
"""
N_EXPECTED_SAMPLES = 4
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3), pheno_file.shape
expected_columns = ['another_disease_name', 'second_column', 'third_column']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1000050, 'another_disease_name'] == '1' # 1000050
assert pheno_file.loc[1000030, 'another_disease_name'] == '1' # 1000030
assert pheno_file.loc[1000020, 'another_disease_name'] == '0' # 1000020
assert pheno_file.loc[1000070, 'another_disease_name'] == '1' # 1000070
assert pheno_file.loc[1000050, 'second_column'] == '1' # 1000050
assert pheno_file.loc[1000030, 'second_column'] == '0' # 1000030
assert pheno_file.loc[1000020, 'second_column'] == '1' # 1000020
assert pheno_file.loc[1000070, 'second_column'] == '0' # 1000070
assert pheno_file.loc[1000050, 'third_column'] == '1' # 1000050
assert pheno_file.loc[1000030, 'third_column'] == '0' # 1000030
assert pheno_file.loc[1000020, 'third_column'] == '1' # 1000020
assert pheno_file.loc[1000070, 'third_column'] == '1' # 1000070
def test_phenotype_query_yaml_disease_sql_alone_csv(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# in this case the filters are not necessary, but it is forced to avoid a problem with joining that will
# be tested in another unit test
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
- c34_0_0 is null or c34_0_0 > -10
data:
mydisease:
sql:
1: c46_0_0 > 0
0: c46_0_0 < 0
"""
N_EXPECTED_SAMPLES = 4
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['mydisease']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1000050, 'mydisease'] == '1' # 1000050
assert pheno_file.loc[1000030, 'mydisease'] == '0' # 1000030
assert pheno_file.loc[1000020, 'mydisease'] == '0' # 1000020
assert pheno_file.loc[1000070, 'mydisease'] == '1' # 1000070
@unittest.skip("We should check if there are repeated eid values, like in this case, due to bad specification of conditions for categories")
def test_phenotype_query_yaml_disease_sql_conflicting_duplicated_samples_csv(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# in this case the filters are not necessary, but it is forced to avoid a problem with joining that will
# be tested in another unit test
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
- c34_0_0 is null or c34_0_0 > -10
data:
mydisease:
sql:
1: c46_0_0 >= 1
0: c46_0_0 <= 1
"""
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 400, response.status_code
def test_phenotype_query_yaml_disease_sql_with_many_columns_csv(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# Here I emulate case_control with sql
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
- c34_0_0 > -10
data:
another_disease_name:
sql:
1: >
eid in (select eid from events where field_id = 85 and event in ('978', '1701'))
OR
eid in (select eid from events where field_id = 84 and event in ('Z876', 'Z678'))
0: >
eid not in (
(select eid from events where field_id = 85 and event in ('978', '1701'))
union
(select eid from events where field_id = 84 and event in ('Z876', 'Z678'))
)
second_column:
case_control:
85:
coding: 1114
third_column:
case_control:
84:
coding: [E103, Z678]
"""
N_EXPECTED_SAMPLES = 4
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 3), pheno_file.shape
expected_columns = ['another_disease_name', 'second_column', 'third_column']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1000050, 'another_disease_name'] == '1' # 1000050
assert pheno_file.loc[1000030, 'another_disease_name'] == '1' # 1000030
assert pheno_file.loc[1000020, 'another_disease_name'] == '0' # 1000020
assert pheno_file.loc[1000070, 'another_disease_name'] == '1' # 1000070
assert pheno_file.loc[1000050, 'second_column'] == '1' # 1000050
assert pheno_file.loc[1000030, 'second_column'] == '0' # 1000030
assert pheno_file.loc[1000020, 'second_column'] == '1' # 1000020
assert pheno_file.loc[1000070, 'second_column'] == '0' # 1000070
assert pheno_file.loc[1000050, 'third_column'] == '1' # 1000050
assert pheno_file.loc[1000030, 'third_column'] == '0' # 1000030
assert pheno_file.loc[1000020, 'third_column'] == '1' # 1000020
assert pheno_file.loc[1000070, 'third_column'] == '1' # 1000070
def test_phenotype_query_yaml_disease_sql_no_filters_csv(self):
"""This test forces a global table to obtain eid from for controls"""
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# in this case the filters are not necessary, but it is forced to avoid a problem with joining that will
# be tested in another unit test
yaml_data = b"""
data:
another_disease_name:
sql:
1: >
eid in (select eid from events where field_id = 85 and event in ('978', '1701'))
OR
eid in (select eid from events where field_id = 84 and event in ('Z876', 'Z678'))
0: >
eid not in (
(select eid from events where field_id = 85 and event in ('978', '1701'))
union
(select eid from events where field_id = 84 and event in ('Z876', 'Z678'))
)
"""
N_EXPECTED_SAMPLES = 7
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['another_disease_name']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1000050, 'another_disease_name'] == '1' # 1000050
assert pheno_file.loc[1000030, 'another_disease_name'] == '1' # 1000030
assert pheno_file.loc[1000040, 'another_disease_name'] == '0' # 1000040
assert pheno_file.loc[1000010, 'another_disease_name'] == '1' # 1000010
assert pheno_file.loc[1000020, 'another_disease_name'] == '0' # 1000020
assert pheno_file.loc[1000070, 'another_disease_name'] == '1' # 1000070
assert pheno_file.loc[1000060, 'another_disease_name'] == '1' # 1000060
def test_phenotype_query_yaml_samples_filters_condition_breaking_for_data(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# in this case there is an or condition that could break all if it is not surrounding by ()
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
- c34_0_0 is null or c34_0_0 > -10 or c34_0_0 > -11
data:
mydisease:
sql:
1: c46_0_0 > 0
0: c46_0_0 < 0
"""
N_EXPECTED_SAMPLES = 4
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, 1), pheno_file.shape
expected_columns = ['mydisease']
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1000050, 'mydisease'] == '1' # 1000050
assert pheno_file.loc[1000030, 'mydisease'] == '0' # 1000030
assert pheno_file.loc[1000020, 'mydisease'] == '0' # 1000020
assert pheno_file.loc[1000070, 'mydisease'] == '1' # 1000070
def test_phenotype_query_yaml_samples_including_numerical(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# in this case there is an or condition that could break all if it is not surrounding by ()
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
- c34_0_0 is null or c34_0_0 > -10 or c34_0_0 > -11
data:
continuous_data: c47_0_0
"""
N_EXPECTED_SAMPLES = 5
expected_columns = ['continuous_data']
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, len(expected_columns)), pheno_file.shape
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1000050, 'continuous_data'] == 'NA'
assert pheno_file.loc[1000030, 'continuous_data'] == '-35.31471'
assert pheno_file.loc[1000020, 'continuous_data'] == '-10.51461'
assert pheno_file.loc[1000060, 'continuous_data'] == '-0.5864'
assert pheno_file.loc[1000070, 'continuous_data'] == '3.5584'
def test_phenotype_query_yaml_samples_including_numerical_integer(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# in this case there is an or condition that could break all if it is not surrounding by ()
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
- c34_0_0 is null or c34_0_0 > -10 or c34_0_0 > -11
data:
integer_data:
(case when c46_0_0 < -5 then NULL else c46_0_0 end)
"""
N_EXPECTED_SAMPLES = 5
expected_columns = ['integer_data']
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, len(expected_columns)), pheno_file.shape
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1000050, 'integer_data'] == '1'
assert pheno_file.loc[1000030, 'integer_data'] == 'NA'
assert pheno_file.loc[1000020, 'integer_data'] == '-2'
assert pheno_file.loc[1000060, 'integer_data'] == 'NA'
assert pheno_file.loc[1000070, 'integer_data'] == '2'
def test_phenotype_query_yaml_samples_including_categorical_and_numerical(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# in this case there is an or condition that could break all if it is not surrounding by ()
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
- c34_0_0 is null or c34_0_0 > -10 or c34_0_0 > -11
data:
mydisease:
sql:
1: c46_0_0 > 0
0: c46_0_0 < 0
third_column:
case_control:
84:
coding: [E103, Z678]
continuous_data:
c47_0_0
integer_data: (case when c46_0_0 < 0 then NULL else c46_0_0 end)
"""
N_EXPECTED_SAMPLES = 5
expected_columns = ['mydisease', 'third_column', 'continuous_data', 'integer_data']
#
# Ask fields
#
response = self.app.post('/ukbrest/api/v1.0/query', data=
{
'file': (io.BytesIO(yaml_data), 'data.yaml'),
'section': 'data',
}, headers={'accept': 'text/csv'})
# Validate
assert response.status_code == 200, response.status_code
pheno_file = pd.read_csv(io.StringIO(response.data.decode('utf-8')), header=0,
index_col='eid', dtype=str, na_values='', keep_default_na=False)
assert pheno_file is not None
assert not pheno_file.empty
assert pheno_file.shape == (N_EXPECTED_SAMPLES, len(expected_columns)), pheno_file.shape
assert len(pheno_file.columns) == len(expected_columns)
assert all(x in expected_columns for x in pheno_file.columns)
assert pheno_file.loc[1000050, 'mydisease'] == '1'
assert pheno_file.loc[1000030, 'mydisease'] == '0'
assert pheno_file.loc[1000020, 'mydisease'] == '0'
assert pheno_file.loc[1000060, 'mydisease'] == 'NA'
assert pheno_file.loc[1000070, 'mydisease'] == '1'
assert pheno_file.loc[1000050, 'third_column'] == '1'
assert pheno_file.loc[1000030, 'third_column'] == '0'
assert pheno_file.loc[1000020, 'third_column'] == '1'
assert pheno_file.loc[1000060, 'third_column'] == '0'
assert pheno_file.loc[1000070, 'third_column'] == '1'
assert pheno_file.loc[1000050, 'continuous_data'] == 'NA'
assert pheno_file.loc[1000030, 'continuous_data'] == '-35.31471'
assert pheno_file.loc[1000020, 'continuous_data'] == '-10.51461'
assert pheno_file.loc[1000060, 'continuous_data'] == '-0.5864'
assert pheno_file.loc[1000070, 'continuous_data'] == '3.5584'
assert pheno_file.loc[1000050, 'integer_data'] == '1'
assert pheno_file.loc[1000030, 'integer_data'] == 'NA'
assert pheno_file.loc[1000020, 'integer_data'] == 'NA'
assert pheno_file.loc[1000060, 'integer_data'] == 'NA'
assert pheno_file.loc[1000070, 'integer_data'] == '2'
def test_phenotype_query_yaml_multiple_files_in_one_yaml(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# in this case there is an or condition that could break all if it is not surrounding by ()
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
- c34_0_0 is null or c34_0_0 > -10 or c34_0_0 > -11
covariates:
field_name_34: c34_0_0
field_name_47: c47_0_0
my_first_dataset:
mydisease:
sql:
1: c46_0_0 > 0
0: c46_0_0 < 0
continuous_data:
c47_0_0
my_second_dataset:
third_column:
case_control:
84:
coding: [E103, Z678]
integer_data: (case when c46_0_0 < 0 then NULL else c46_0_0 end)
"""
# covariates
data_fetched =\
self._make_yaml_request(
yaml_data, 'covariates', 5,
['field_name_34', 'field_name_47']
)
assert data_fetched.loc[1000020, 'field_name_34'] == '34'
assert data_fetched.loc[1000030, 'field_name_34'] == '-6'
assert data_fetched.loc[1000050, 'field_name_34'] == '-4'
assert data_fetched.loc[1000060, 'field_name_34'] == 'NA'
assert data_fetched.loc[1000070, 'field_name_34'] == '-5'
# my_first_dataset
data_fetched =\
self._make_yaml_request(
yaml_data, 'my_first_dataset', 5,
['mydisease', 'continuous_data']
)
assert data_fetched.loc[1000050, 'mydisease'] == '1'
assert data_fetched.loc[1000030, 'mydisease'] == '0'
assert data_fetched.loc[1000020, 'mydisease'] == '0'
assert data_fetched.loc[1000060, 'mydisease'] == 'NA'
assert data_fetched.loc[1000070, 'mydisease'] == '1'
assert data_fetched.loc[1000050, 'continuous_data'] == 'NA'
assert data_fetched.loc[1000030, 'continuous_data'] == '-35.31471'
assert data_fetched.loc[1000020, 'continuous_data'] == '-10.51461'
assert data_fetched.loc[1000060, 'continuous_data'] == '-0.5864'
assert data_fetched.loc[1000070, 'continuous_data'] == '3.5584'
# my_second_dataset
data_fetched =\
self._make_yaml_request(
yaml_data, 'my_second_dataset', 5,
['third_column', 'integer_data']
)
assert data_fetched.loc[1000050, 'third_column'] == '1'
assert data_fetched.loc[1000030, 'third_column'] == '0'
assert data_fetched.loc[1000020, 'third_column'] == '1'
assert data_fetched.loc[1000060, 'third_column'] == '0'
assert data_fetched.loc[1000070, 'third_column'] == '1'
assert data_fetched.loc[1000050, 'integer_data'] == '1'
assert data_fetched.loc[1000030, 'integer_data'] == 'NA'
assert data_fetched.loc[1000020, 'integer_data'] == 'NA'
assert data_fetched.loc[1000060, 'integer_data'] == 'NA'
assert data_fetched.loc[1000070, 'integer_data'] == '2'
def test_phenotype_query_yaml_simple_query(self):
# Prepare
self.setUp('pheno2sql/example13/example13_diseases.csv',
bgen_sample_file=get_repository_path('pheno2sql/example13/impv2.sample'),
sql_chunksize=2, n_columns_per_table=2)
# this type of query, with 'simple_' at the begining of the data section, makes direct queries to the
# database
yaml_data = b"""
samples_filters:
- lower(c21_2_0) in ('yes', 'no', 'maybe', 'probably')
- c34_0_0 is null or c34_0_0 > -10 or c34_0_0 > -11
simple_covariates:
field_name_34: c34_0_0
field_name_47: c47_0_0
"""
# simple_covariates
data_fetched =\
self._make_yaml_request(
yaml_data, 'simple_covariates', 5,
['field_name_34', 'field_name_47']
)
assert data_fetched.loc[1000020, 'field_name_34'] == '34'
assert data_fetched.loc[1000030, 'field_name_34'] == '-6'
assert data_fetched.loc[1000050, 'field_name_34'] == '-4'
assert data_fetched.loc[1000060, 'field_name_34'] == 'NA'
assert data_fetched.loc[1000070, 'field_name_34'] == '-5'
| gpl-3.0 |
tomsilver/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/legend.py | 69 | 30705 | """
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
Return value is a sequence of text, line instances that make
up the legend
"""
from __future__ import division
import warnings
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.cbook import is_string_like, iterable, silent_list, safezip
from matplotlib.font_manager import FontProperties
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, Rectangle, Shadow, FancyBboxPatch
from matplotlib.collections import LineCollection, RegularPolyCollection
from matplotlib.transforms import Bbox
from matplotlib.offsetbox import HPacker, VPacker, PackerBase, TextArea, DrawingArea
class Legend(Artist):
"""
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are::
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
loc can be a tuple of the noramilzed coordinate values with
respect its parent.
Return value is a sequence of text, line instances that make
up the legend
"""
codes = {'best' : 0, # only implemented for axis legends
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
}
zorder = 5
def __str__(self):
return "Legend"
def __init__(self, parent, handles, labels,
loc = None,
numpoints = None, # the number of points in the legend line
markerscale = None, # the relative size of legend markers vs. original
scatterpoints = 3, # TODO: may be an rcParam
scatteryoffsets=None,
prop = None, # properties for the legend texts
# the following dimensions are in axes coords
pad = None, # deprecated; use borderpad
labelsep = None, # deprecated; use labelspacing
handlelen = None, # deprecated; use handlelength
handletextsep = None, # deprecated; use handletextpad
axespad = None, # deprecated; use borderaxespad
# spacing & pad defined as a fractionof the font-size
borderpad = None, # the whitespace inside the legend border
labelspacing=None, #the vertical space between the legend entries
handlelength=None, # the length of the legend handles
handletextpad=None, # the pad between the legend handle and text
borderaxespad=None, # the pad between the axes and legend border
columnspacing=None, # spacing between columns
ncol=1, # number of columns
mode=None, # mode for horizontal distribution of columns. None, "expand"
fancybox=None, # True use a fancy box, false use a rounded box, none use rc
shadow = None,
):
"""
- *parent* : the artist that contains the legend
- *handles* : a list of artists (lines, patches) to add to the legend
- *labels* : a list of strings to label the legend
Optional keyword arguments:
================ ==================================================================
Keyword Description
================ ==================================================================
loc a location code or a tuple of coordinates
numpoints the number of points in the legend line
prop the font property
markerscale the relative size of legend markers vs. original
fancybox if True, draw a frame with a round fancybox. If None, use rc
shadow if True, draw a shadow behind legend
scatteryoffsets a list of yoffsets for scatter symbols in legend
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ==================================================================
The dimensions of pad and spacing are given as a fraction of the
fontsize. Values from rcParams will be used if None.
"""
from matplotlib.axes import Axes # local import only to avoid circularity
from matplotlib.figure import Figure # local import only to avoid circularity
Artist.__init__(self)
if prop is None:
self.prop=FontProperties(size=rcParams["legend.fontsize"])
else:
self.prop=prop
self.fontsize = self.prop.get_size_in_points()
propnames=['numpoints', 'markerscale', 'shadow', "columnspacing",
"scatterpoints"]
localdict = locals()
for name in propnames:
if localdict[name] is None:
value = rcParams["legend."+name]
else:
value = localdict[name]
setattr(self, name, value)
# Take care the deprecated keywords
deprecated_kwds = {"pad":"borderpad",
"labelsep":"labelspacing",
"handlelen":"handlelength",
"handletextsep":"handletextpad",
"axespad":"borderaxespad"}
# convert values of deprecated keywords (ginve in axes coords)
# to new vaules in a fraction of the font size
# conversion factor
bbox = parent.bbox
axessize_fontsize = min(bbox.width, bbox.height)/self.fontsize
for k, v in deprecated_kwds.items():
# use deprecated value if not None and if their newer
# counter part is None.
if localdict[k] is not None and localdict[v] is None:
warnings.warn("Use '%s' instead of '%s'." % (v, k),
DeprecationWarning)
setattr(self, v, localdict[k]*axessize_fontsize)
continue
# Otherwise, use new keywords
if localdict[v] is None:
setattr(self, v, rcParams["legend."+v])
else:
setattr(self, v, localdict[v])
del localdict
self._ncol = ncol
if self.numpoints <= 0:
raise ValueError("numpoints must be >= 0; it was %d"% numpoints)
# introduce y-offset for handles of the scatter plot
if scatteryoffsets is None:
self._scatteryoffsets = np.array([3./8., 4./8., 2.5/8.])
else:
self._scatteryoffsets = np.asarray(scatteryoffsets)
reps = int(self.numpoints / len(self._scatteryoffsets)) + 1
self._scatteryoffsets = np.tile(self._scatteryoffsets, reps)[:self.scatterpoints]
# _legend_box is an OffsetBox instance that contains all
# legend items and will be initialized from _init_legend_box()
# method.
self._legend_box = None
if isinstance(parent,Axes):
self.isaxes = True
self.set_figure(parent.figure)
elif isinstance(parent,Figure):
self.isaxes = False
self.set_figure(parent)
else:
raise TypeError("Legend needs either Axes or Figure as parent")
self.parent = parent
if loc is None:
loc = rcParams["legend.loc"]
if not self.isaxes and loc in [0,'best']:
loc = 'upper right'
if is_string_like(loc):
if loc not in self.codes:
if self.isaxes:
warnings.warn('Unrecognized location "%s". Falling back on "best"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 0
else:
warnings.warn('Unrecognized location "%s". Falling back on "upper right"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 1
else:
loc = self.codes[loc]
if not self.isaxes and loc == 0:
warnings.warn('Automatic legend placement (loc="best") not implemented for figure legend. '
'Falling back on "upper right".')
loc = 1
self._loc = loc
self._mode = mode
# We use FancyBboxPatch to draw a legend frame. The location
# and size of the box will be updated during the drawing time.
self.legendPatch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.fontsize,
snap=True
)
# The width and height of the legendPatch will be set (in the
# draw()) to the length that includes the padding. Thus we set
# pad=0 here.
if fancybox is None:
fancybox = rcParams["legend.fancybox"]
if fancybox == True:
self.legendPatch.set_boxstyle("round",pad=0,
rounding_size=0.2)
else:
self.legendPatch.set_boxstyle("square",pad=0)
self._set_artist_props(self.legendPatch)
self._drawFrame = True
# init with null renderer
self._init_legend_box(handles, labels)
self._last_fontsize_points = self.fontsize
def _set_artist_props(self, a):
"""
set the boilerplate props for artists added to axes
"""
a.set_figure(self.figure)
for c in self.get_children():
c.set_figure(self.figure)
a.set_transform(self.get_transform())
def _findoffset_best(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend at its best position"
ox, oy = self._find_best_position(width, height, renderer)
return ox+xdescent, oy+ydescent
def _findoffset_loc(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend using the location code"
if iterable(self._loc) and len(self._loc)==2:
# when loc is a tuple of axes(or figure) coordinates.
fx, fy = self._loc
bbox = self.parent.bbox
x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy
else:
bbox = Bbox.from_bounds(0, 0, width, height)
x, y = self._get_anchored_bbox(self._loc, bbox, self.parent.bbox, renderer)
return x+xdescent, y+ydescent
def draw(self, renderer):
"Draw everything that belongs to the legend"
if not self.get_visible(): return
self._update_legend_box(renderer)
renderer.open_group('legend')
# find_offset function will be provided to _legend_box and
# _legend_box will draw itself at the location of the return
# value of the find_offset.
if self._loc == 0:
_findoffset = self._findoffset_best
else:
_findoffset = self._findoffset_loc
def findoffset(width, height, xdescent, ydescent):
return _findoffset(width, height, xdescent, ydescent, renderer)
self._legend_box.set_offset(findoffset)
fontsize = renderer.points_to_pixels(self.fontsize)
# if mode == fill, set the width of the legend_box to the
# width of the paret (minus pads)
if self._mode in ["expand"]:
pad = 2*(self.borderaxespad+self.borderpad)*fontsize
self._legend_box.set_width(self.parent.bbox.width-pad)
if self._drawFrame:
# update the location and size of the legend
bbox = self._legend_box.get_window_extent(renderer)
self.legendPatch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
self.legendPatch.set_mutation_scale(fontsize)
if self.shadow:
shadow = Shadow(self.legendPatch, 2, -2)
shadow.draw(renderer)
self.legendPatch.draw(renderer)
self._legend_box.draw(renderer)
renderer.close_group('legend')
def _approx_text_height(self, renderer=None):
"""
Return the approximate height of the text. This is used to place
the legend handle.
"""
if renderer is None:
return self.fontsize
else:
return renderer.points_to_pixels(self.fontsize)
def _init_legend_box(self, handles, labels):
"""
Initiallize the legend_box. The legend_box is an instance of
the OffsetBox, which is packed with legend handles and
texts. Once packed, their location is calculated during the
drawing time.
"""
fontsize = self.fontsize
# legend_box is a HPacker, horizontally packed with
# columns. Each column is a VPacker, vertically packed with
# legend items. Each legend item is HPacker packed with
# legend handleBox and labelBox. handleBox is an instance of
# offsetbox.DrawingArea which contains legend handle. labelBox
# is an instance of offsetbox.TextArea which contains legend
# text.
text_list = [] # the list of text instances
handle_list = [] # the list of text instances
label_prop = dict(verticalalignment='baseline',
horizontalalignment='left',
fontproperties=self.prop,
)
labelboxes = []
for l in labels:
textbox = TextArea(l, textprops=label_prop,
multilinebaseline=True, minimumdescent=True)
text_list.append(textbox._text)
labelboxes.append(textbox)
handleboxes = []
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height() * 0.7
descent = 0.
# each handle needs to be drawn inside a box of (x, y, w, h) =
# (0, -descent, width, height). And their corrdinates should
# be given in the display coordinates.
# NOTE : the coordinates will be updated again in
# _update_legend_box() method.
# The transformation of each handle will be automatically set
# to self.get_trasnform(). If the artist does not uses its
# default trasnform (eg, Collections), you need to
# manually set their transform to the self.get_transform().
for handle in handles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
legline.update_from(handle)
self._set_artist_props(legline) # after update
legline.set_clip_box(None)
legline.set_clip_path(None)
legline.set_drawstyle('default')
legline.set_marker('None')
handle_list.append(legline)
legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])
legline_marker.update_from(handle)
self._set_artist_props(legline_marker)
legline_marker.set_clip_box(None)
legline_marker.set_clip_path(None)
legline_marker.set_linestyle('None')
# we don't want to add this to the return list because
# the texts and handles are assumed to be in one-to-one
# correpondence.
legline._legmarker = legline_marker
elif isinstance(handle, Patch):
p = Rectangle(xy=(0., 0.),
width = self.handlelength*fontsize,
height=(height-descent),
)
p.update_from(handle)
self._set_artist_props(p)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
elif isinstance(handle, LineCollection):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
self._set_artist_props(legline)
legline.set_clip_box(None)
legline.set_clip_path(None)
lw = handle.get_linewidth()[0]
dashes = handle.get_dashes()[0]
color = handle.get_colors()[0]
legline.set_color(color)
legline.set_linewidth(lw)
legline.set_dashes(dashes)
handle_list.append(legline)
elif isinstance(handle, RegularPolyCollection):
#ydata = self._scatteryoffsets
ydata = height*self._scatteryoffsets
size_max, size_min = max(handle.get_sizes()),\
min(handle.get_sizes())
# we may need to scale these sizes by "markerscale"
# attribute. But other handle types does not seem
# to care about this attribute and it is currently ignored.
if self.scatterpoints < 4:
sizes = [.5*(size_max+size_min), size_max,
size_min]
else:
sizes = (size_max-size_min)*np.linspace(0,1,self.scatterpoints)+size_min
p = type(handle)(handle.get_numsides(),
rotation=handle.get_rotation(),
sizes=sizes,
offsets=zip(xdata_marker,ydata),
transOffset=self.get_transform(),
)
p.update_from(handle)
p.set_figure(self.figure)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
else:
handle_list.append(None)
handlebox = DrawingArea(width=self.handlelength*fontsize,
height=height,
xdescent=0., ydescent=descent)
handle = handle_list[-1]
handlebox.add_artist(handle)
if hasattr(handle, "_legmarker"):
handlebox.add_artist(handle._legmarker)
handleboxes.append(handlebox)
# We calculate number of lows in each column. The first
# (num_largecol) columns will have (nrows+1) rows, and remaing
# (num_smallcol) columns will have (nrows) rows.
nrows, num_largecol = divmod(len(handleboxes), self._ncol)
num_smallcol = self._ncol-num_largecol
# starting index of each column and number of rows in it.
largecol = safezip(range(0, num_largecol*(nrows+1), (nrows+1)),
[nrows+1] * num_largecol)
smallcol = safezip(range(num_largecol*(nrows+1), len(handleboxes), nrows),
[nrows] * num_smallcol)
handle_label = safezip(handleboxes, labelboxes)
columnbox = []
for i0, di in largecol+smallcol:
# pack handleBox and labelBox into itemBox
itemBoxes = [HPacker(pad=0,
sep=self.handletextpad*fontsize,
children=[h, t], align="baseline")
for h, t in handle_label[i0:i0+di]]
# minimumdescent=False for the text of the last row of the column
itemBoxes[-1].get_children()[1].set_minimumdescent(False)
# pack columnBox
columnbox.append(VPacker(pad=0,
sep=self.labelspacing*fontsize,
align="baseline",
children=itemBoxes))
if self._mode == "expand":
mode = "expand"
else:
mode = "fixed"
sep = self.columnspacing*fontsize
self._legend_box = HPacker(pad=self.borderpad*fontsize,
sep=sep, align="baseline",
mode=mode,
children=columnbox)
self._legend_box.set_figure(self.figure)
self.texts = text_list
self.legendHandles = handle_list
def _update_legend_box(self, renderer):
"""
Update the dimension of the legend_box. This is required
becuase the paddings, the hadle size etc. depends on the dpi
of the renderer.
"""
# fontsize in points.
fontsize = renderer.points_to_pixels(self.fontsize)
if self._last_fontsize_points == fontsize:
# no update is needed
return
# each handle needs to be drawn inside a box of
# (x, y, w, h) = (0, -descent, width, height).
# And their corrdinates should be given in the display coordinates.
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height(renderer) * 0.7
descent = 0.
for handle in self.legendHandles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
legline = handle
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline.set_data(xdata, ydata)
legline_marker = legline._legmarker
legline_marker.set_data(xdata_marker, ydata[:len(xdata_marker)])
elif isinstance(handle, Patch):
p = handle
p.set_bounds(0., 0.,
self.handlelength*fontsize,
(height-descent),
)
elif isinstance(handle, RegularPolyCollection):
p = handle
ydata = height*self._scatteryoffsets
p.set_offsets(zip(xdata_marker,ydata))
# correction factor
cor = fontsize / self._last_fontsize_points
# helper function to iterate over all children
def all_children(parent):
yield parent
for c in parent.get_children():
for cc in all_children(c): yield cc
#now update paddings
for box in all_children(self._legend_box):
if isinstance(box, PackerBase):
box.pad = box.pad * cor
box.sep = box.sep * cor
elif isinstance(box, DrawingArea):
box.width = self.handlelength*fontsize
box.height = height
box.xdescent = 0.
box.ydescent=descent
self._last_fontsize_points = fontsize
def _auto_legend_data(self):
"""
Returns list of vertices and extents covered by the plot.
Returns a two long list.
First element is a list of (x, y) vertices (in
display-coordinates) covered by all the lines and line
collections, in the legend's handles.
Second element is a list of bounding boxes for all the patches in
the legend's handles.
"""
assert self.isaxes # should always hold because function is only called internally
ax = self.parent
vertices = []
bboxes = []
lines = []
for handle in ax.lines:
assert isinstance(handle, Line2D)
path = handle.get_path()
trans = handle.get_transform()
tpath = trans.transform_path(path)
lines.append(tpath)
for handle in ax.patches:
assert isinstance(handle, Patch)
if isinstance(handle, Rectangle):
transform = handle.get_data_transform()
bboxes.append(handle.get_bbox().transformed(transform))
else:
transform = handle.get_transform()
bboxes.append(handle.get_path().get_extents(transform))
return [vertices, bboxes, lines]
def draw_frame(self, b):
'b is a boolean. Set draw frame to b'
self._drawFrame = b
def get_children(self):
'return a list of child artists'
children = []
if self._legend_box:
children.append(self._legend_box)
return children
def get_frame(self):
'return the Rectangle instance used to frame the legend'
return self.legendPatch
def get_lines(self):
'return a list of lines.Line2D instances in the legend'
return [h for h in self.legendHandles if isinstance(h, Line2D)]
def get_patches(self):
'return a list of patch instances in the legend'
return silent_list('Patch', [h for h in self.legendHandles if isinstance(h, Patch)])
def get_texts(self):
'return a list of text.Text instance in the legend'
return silent_list('Text', self.texts)
def get_window_extent(self):
'return a extent of the the legend'
return self.legendPatch.get_window_extent()
def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):
"""
Place the *bbox* inside the *parentbbox* according to a given
location code. Return the (x,y) coordinate of the bbox.
- loc: a location code in range(1, 11).
This corresponds to the possible values for self._loc, excluding "best".
- bbox: bbox to be placed, display coodinate units.
- parentbbox: a parent box which will contain the bbox. In
display coordinates.
"""
assert loc in range(1,11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)
anchor_coefs={UR:"NE",
UL:"NW",
LL:"SW",
LR:"SE",
R:"E",
CL:"W",
CR:"E",
LC:"S",
UC:"N",
C:"C"}
c = anchor_coefs[loc]
fontsize = renderer.points_to_pixels(self.fontsize)
container = parentbbox.padded(-(self.borderaxespad) * fontsize)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
def _find_best_position(self, width, height, renderer, consider=None):
"""
Determine the best location to place the legend.
`consider` is a list of (x, y) pairs to consider as a potential
lower-left corner of the legend. All are display coords.
"""
assert self.isaxes # should always hold because function is only called internally
verts, bboxes, lines = self._auto_legend_data()
bbox = Bbox.from_bounds(0, 0, width, height)
consider = [self._get_anchored_bbox(x, bbox, self.parent.bbox, renderer) for x in range(1, len(self.codes))]
#tx, ty = self.legendPatch.get_x(), self.legendPatch.get_y()
candidates = []
for l, b in consider:
legendBox = Bbox.from_bounds(l, b, width, height)
badness = 0
badness = legendBox.count_contains(verts)
badness += legendBox.count_overlaps(bboxes)
for line in lines:
if line.intersects_bbox(legendBox):
badness += 1
ox, oy = l, b
if badness == 0:
return ox, oy
candidates.append((badness, (l, b)))
# rather than use min() or list.sort(), do this so that we are assured
# that in the case of two equal badnesses, the one first considered is
# returned.
# NOTE: list.sort() is stable.But leave as it is for now. -JJL
minCandidate = candidates[0]
for candidate in candidates:
if candidate[0] < minCandidate[0]:
minCandidate = candidate
ox, oy = minCandidate[1]
return ox, oy
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.