repo_name stringlengths 6 67 | path stringlengths 5 185 | copies stringlengths 1 3 | size stringlengths 4 6 | content stringlengths 1.02k 962k | license stringclasses 15 values |
|---|---|---|---|---|---|
CforED/Machine-Learning | sklearn/utils/tests/test_extmath.py | 19 | 21979 | # Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis Engemann <d.engemann@fz-juelich.de>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.01)
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method
# with iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5,
power_iteration_normalizer=normalizer)
# the iterated power method is still managing to get most of the
# structure at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(300, 1000, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
print(error_2 - error_20)
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
print(error_2 - error)
assert_greater(15, np.abs(error_2 - error))
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
# ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
# ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
# ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
# min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = \
_incremental_mean_and_var(X2, old_means, old_variances,
old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
@skip_if_32bit
def test_incremental_variance_numerical_stability():
# Test Youngs and Cramer incremental variance formulas.
def np_var(A):
return A.var(axis=0)
# Naive one pass variance computation - not numerically stable
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
def one_pass_var(X):
n = X.shape[0]
exp_x2 = (X ** 2).sum(axis=0) / n
expx_2 = (X.sum(axis=0) / n) ** 2
return exp_x2 - expx_2
# Two-pass algorithm, stable.
# We use it as a benchmark. It is not an online algorithm
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass_algorithm
def two_pass_var(X):
mean = X.mean(axis=0)
Y = X.copy()
return np.mean((Y - mean)**2, axis=0)
# Naive online implementation
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
# This works only for chunks for size 1
def naive_mean_variance_update(x, last_mean, last_variance,
last_sample_count):
updated_sample_count = (last_sample_count + 1)
samples_ratio = last_sample_count / float(updated_sample_count)
updated_mean = x / updated_sample_count + last_mean * samples_ratio
updated_variance = last_variance * samples_ratio + \
(x - last_mean) * (x - updated_mean) / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
# We want to show a case when one_pass_var has error > 1e-3 while
# _batch_mean_variance_update has less.
tol = 200
n_features = 2
n_samples = 10000
x1 = np.array(1e8, dtype=np.float64)
x2 = np.log(1e-5, dtype=np.float64)
A0 = x1 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A1 = x2 * np.ones((n_samples // 2, n_features), dtype=np.float64)
A = np.vstack((A0, A1))
# Older versions of numpy have different precision
# In some old version, np.var is not stable
if np.abs(np_var(A) - two_pass_var(A)).max() < 1e-6:
stable_var = np_var
else:
stable_var = two_pass_var
# Naive one pass var: >tol (=1063)
assert_greater(np.abs(stable_var(A) - one_pass_var(A)).max(), tol)
# Starting point for online algorithms: after A0
# Naive implementation: >tol (436)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
naive_mean_variance_update(A1[i, :], mean, var, n)
assert_equal(n, A.shape[0])
# the mean is also slightly unstable
assert_greater(np.abs(A.mean(axis=0) - mean).max(), 1e-6)
assert_greater(np.abs(stable_var(A) - var).max(), tol)
# Robust implementation: <tol (177)
mean, var, n = A0[0, :], np.zeros(n_features), n_samples // 2
for i in range(A1.shape[0]):
mean, var, n = \
_incremental_mean_and_var(A1[i, :].reshape((1, A1.shape[1])),
mean, var, n)
assert_equal(n, A.shape[0])
assert_array_almost_equal(A.mean(axis=0), mean)
assert_greater(tol, np.abs(stable_var(A) - var).max())
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _incremental_mean_and_var(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
def test_softmax():
rng = np.random.RandomState(0)
X = rng.randn(3, 5)
exp_X = np.exp(X)
sum_exp_X = np.sum(exp_X, axis=1).reshape((-1, 1))
assert_array_almost_equal(softmax(X), exp_X / sum_exp_X)
| bsd-3-clause |
willettk/rgz-analysis | python/test_consensus.py | 2 | 15556 | from __future__ import division
# Local RGZ modules
import collinearity
from load_contours import get_contours,make_pathdict
# Default packages
import datetime
import operator
from collections import Counter
import cStringIO
import urllib
import json
import os.path
import time
import shutil
# Other packages
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.pyplot import cm
from matplotlib.path import Path
import matplotlib.patches as patches
from scipy.ndimage.filters import maximum_filter
from scipy import stats
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
from scipy.linalg.basic import LinAlgError
from astropy.io import fits
from astropy import wcs
from pymongo import MongoClient
from PIL import Image
# MongoDB parameters
client = MongoClient('localhost', 27017)
db = client['radio']
subjects = db['radio_subjects'] # subjects = images
classifications = db['radio_classifications'] # classifications = classifications of each subject per user
# General variables for the RGZ sample
main_release_date = datetime.datetime(2013, 12, 17, 0, 0, 0, 0)
IMG_HEIGHT_OLD = 424.0 # number of pixels in the original JPG image along the y axis
IMG_WIDTH_OLD = 424.0 # number of pixels in the original JPG image along the x axis
IMG_HEIGHT_NEW = 500.0 # number of pixels in the downloaded JPG image along the y axis
IMG_WIDTH_NEW = 500.0 # number of pixels in the downloaded JPG image along the x axis
FITS_HEIGHT = 301.0 # number of pixels in the FITS image (?) along the y axis
FITS_WIDTH = 301.0 # number of pixels in the FITS image (?) along the x axis
FIRST_FITS_HEIGHT = 132.0 # number of pixels in the FITS image along the y axis
FIRST_FITS_WIDTH = 132.0 # number of pixels in the FITS image along the y axis
# Need to add parameters for ATLAS, both IR and radio.
PIXEL_SIZE = 0.00016667#/3600.0 # the number of arcseconds per pixel in the FITS image
xmin = 1.
xmax = IMG_HEIGHT_NEW
ymin = 1.
ymax = IMG_WIDTH_NEW
bad_keys = ('finished_at','started_at','user_agent','lang','pending')
expert_names = [u'42jkb', u'ivywong', u'stasmanian', u'klmasters', u'Kevin', u'akapinska', u'enno.middelberg', u'xDocR', u'vrooje', u'KWillett', u'DocR']
# Paths
rgz_dir = '/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'
pathdict = make_pathdict()
# Find the consensus classification for a single subject
@profile
def checksum(zid='ARG000255x',experts_only=False,excluded=[],no_anonymous=False,write_peak_data=False):
# Find the consensus for all users who have classified a particular galaxy
sub = subjects.find_one({'zooniverse_id':zid})
imgid = sub['_id']
# Classifications for this subject after launch date
class_params = {"subject_ids": imgid, "updated_at": {"$gt": main_release_date}}
# Only get the consensus classification for the science team members
if experts_only:
class_params['expert'] = True
# If comparing a particular volunteer (such as an expert), don't include self-comparison
if len(excluded) > 0:
class_params['user_name'] = {"$nin":excluded}
'''
# To exclude the experts:
class_params['expert'] = {"$exists":False}
'''
# To exclude anonymous classifications (registered users only):
if no_anonymous:
if class_params.has_key('user_name'):
class_params['user_name']["$exists"] = True
else:
class_params['user_name'] = {"$exists":True}
_c = classifications.find(class_params)
#clist_all = list(classifications.find(class_params))
# Empty dicts and lists
cdict = {}
checksum_list = []
unique_users = set()
#clen_start = len(clist_all)
clen_start = 0
listcount = []
# Compute the most popular combination for each NUMBER of galaxies identified in image
clist_all = []
#for c in clist_all:
for c in _c:
clist_all.append(c)
clen_start += 1
# Skip classification if they already did one?
try:
user_name = c['user_name']
except KeyError:
user_name = 'Anonymous'
if user_name not in unique_users or user_name is 'Anonymous':
unique_users.add(user_name)
listcount.append(True)
sumlist = [] # List of the checksums over all possible combinations
# Only find data that was an actual marking, not metadata
goodann = [x for x in c['annotations'] if (x.keys()[0] not in bad_keys)]
n_galaxies = len(goodann)
if n_galaxies > 0: # There must be at least one galaxy!
for idx,ann in enumerate(goodann):
xmaxlist = []
try:
radio_comps = ann['radio']
# loop over all the radio components within an galaxy
if radio_comps != 'No Contours':
for rc in radio_comps:
xmaxlist.append(float(radio_comps[rc]['xmax']))
# or make the value -99 if there are no contours
else:
xmaxlist.append(-99)
except KeyError:
xmaxlist.append(-99)
# To create a unique ID for the combination of radio components,
# take the product of all the xmax coordinates and sum them together.
product = reduce(operator.mul, xmaxlist, 1)
sumlist.append(round(product,3))
checksum = sum(sumlist)
else:
checksum = -99
checksum_list.append(checksum)
c['checksum'] = checksum
# Insert checksum into dictionary with number of galaxies as the index
if cdict.has_key(n_galaxies):
cdict[n_galaxies].append(checksum)
else:
cdict[n_galaxies] = [checksum]
else:
listcount.append(False)
checksum_list.append(-99)
#print 'Removing classification for %s' % user_name
# Remove duplicates and classifications for no object
#clist = [c for lc,c in zip(listcount,checksum_list) if lc and c != -99]
clist = [c for lc,c in zip(listcount,clist_all) if lc and c['checksum'] != -99]
clen_diff = clen_start - len(clist)
'''
if clen_diff > 0:
print '\nSkipping %i duplicated classifications for %s. %i good classifications total.' % (clen_diff,zid,len(clist))
'''
maxval=0
mc_checksum = 0.
# Find the number of galaxies that has the highest number of consensus classifications
for k,v in cdict.iteritems():
mc = Counter(v).most_common()
# Check if the most common selection coordinate was for no radio contours
if mc[0][0] == -99.0:
if len(mc) > 1:
# If so, take the selection with the next-highest number of counts
mc_best = mc[1]
else:
continue
# Selection with the highest number of counts
else:
mc_best = mc[0]
# If the new selection has more counts than the previous one, choose it as the best match;
# if tied or less than this, remain with the current consensus number of galaxies
if mc_best[1] > maxval:
maxval = mc_best[1]
mc_checksum = mc_best[0]
# Find a galaxy that matches the checksum (easier to keep track as a list)
try:
cmatch = next(i for i in clist if i['checksum'] == mc_checksum)
except StopIteration:
# Necessary for objects like ARG0003par; one classifier recorded 22 "No IR","No Contours" in a short space. Still shouldn't happen.
print 'No non-zero classifications recorded for %s' % zid
return None
'''
try:
index = clist.index(mc_checksum)
cmatch = _c[index]
except ValueError:
# Necessary for objects like ARG0003par; one classifier recorded 22 "No IR","No Contours" in a short space. Still shouldn't happen.
print 'No non-zero classifications recorded for %s' % zid
return None
'''
# Find IR peak for the checksummed galaxies
goodann = [x for x in cmatch['annotations'] if x.keys()[0] not in bad_keys]
# Find the sum of the xmax coordinates for each galaxy. This gives the index to search on.
cons = {}
cons['zid'] = zid
cons['source'] = sub['metadata']['source']
ir_x,ir_y = {},{}
cons['answer'] = {}
cons['n_users'] = maxval
cons['n_total'] = len(clist)
answer = cons['answer']
for k,gal in enumerate(goodann):
xmax_temp = []
bbox_temp = []
try:
for v in gal['radio'].itervalues():
xmax_temp.append(float(v['xmax']))
bbox_temp.append((v['xmax'],v['ymax'],v['xmin'],v['ymin']))
checksum2 = round(sum(xmax_temp),3)
answer[checksum2] = {}
answer[checksum2]['ind'] = k
answer[checksum2]['xmax'] = xmax_temp
answer[checksum2]['bbox'] = bbox_temp
except KeyError:
print gal, zid
except AttributeError:
print 'No Sources, No IR recorded for %s' % zid
# Make empty copy of next dict in same loop
ir_x[k] = []
ir_y[k] = []
# Now loop over all sets of classifications to get the IR counterparts
for c in clist:
if c['checksum'] == mc_checksum:
annlist = [ann for ann in c['annotations'] if ann.keys()[0] not in bad_keys]
for ann in annlist:
if 'ir' in ann.keys():
# Find the index k that this corresponds to
try:
xmax_checksum = round(sum([float(ann['radio'][a]['xmax']) for a in ann['radio']]),3)
except TypeError:
xmax_checksum = -99
try:
k = answer[xmax_checksum]['ind']
if ann['ir'] == 'No Sources':
ir_x[k].append(-99)
ir_y[k].append(-99)
else:
# Only takes the first IR source right now; NEEDS TO BE MODIFIED.
ir_x[k].append(float(ann['ir']['0']['x']))
ir_y[k].append(float(ann['ir']['0']['y']))
except KeyError:
print '"No radio" still appearing as valid consensus option.'
# Perform a kernel density estimate on the data for each galaxy
scale_ir = IMG_HEIGHT_NEW/IMG_HEIGHT_OLD
peak_data = []
# Remove empty IR peaks if they exist
for (xk,xv),(yk,yv) in zip(ir_x.iteritems(),ir_y.iteritems()):
if len(xv) == 0:
ir_x.pop(xk)
if len(yv) == 0:
ir_y.pop(yk)
assert len(ir_x) == len(ir_y),'Lengths of ir_x (%i) and ir_y (%i) are not the same' % (len(ir_x),len(ir_y))
for (xk,xv),(yk,yv) in zip(ir_x.iteritems(),ir_y.iteritems()):
if len(xv) == 0:
irx
pd = {}
x_exists = [xt * scale_ir for xt in xv if xt != -99.0]
y_exists = [yt * scale_ir for yt in yv if yt != -99.0]
x_all = [xt * scale_ir for xt in xv]
y_all = [yt * scale_ir for yt in yv]
coords_all = [(xx,yy) for xx,yy in zip(x_all,y_all)]
ir_Counter = Counter(coords_all)
most_common_ir = ir_Counter.most_common(1)[0][0]
if len(Counter(x_exists)) > 2 and len(Counter(y_exists)) > 2 and most_common_ir != (-99,-99):
# X,Y = grid of uniform coordinates over the IR pixel plane
X, Y = np.mgrid[xmin:xmax, ymin:ymax]
positions = np.vstack([X.ravel(), Y.ravel()])
try:
values = np.vstack([x_exists, y_exists])
except ValueError:
# Breaks on the tutorial subject. Find out why len(x) != len(y)
print zid
print 'Length of IR x array: %i; Length of IR y array: %i' % (len(x_exists),len(y_exists))
try:
kernel = stats.gaussian_kde(values)
except LinAlgError:
print 'LinAlgError in KD estimation for %s' % zid,x_exists,y_exists
continue
# Even if there are more than 2 sets of points, if they are mutually co-linear,
# matrix can't invert and kernel returns NaNs.
kp = kernel(positions)
if np.isnan(kp).sum() > 0:
acp = collinearity.collinear(x_exists,y_exists)
if len(acp) > 0:
print 'There are %i unique points for %s (source no. %i in the field), but all are co-linear; KDE estimate does not work.' % (len(Counter(x_exists)),zid,xk)
else:
print 'There are NaNs in the KDE for %s (source no. %i in the field), but points are not co-linear.' % (zid,xk)
for k,v in answer.iteritems():
if v['ind'] == xk:
answer[k]['ir'] = (np.mean(x_exists),np.mean(y_exists))
else:
Z = np.reshape(kp.T, X.shape)
# Find the number of peaks
# http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
neighborhood = np.ones((10,10))
local_max = maximum_filter(Z, footprint=neighborhood)==Z
background = (Z==0)
eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
detected_peaks = local_max ^ eroded_background
npeaks = detected_peaks.sum()
#return X,Y,Z,npeaks
pd['X'] = X
pd['Y'] = Y
pd['Z'] = Z
pd['npeaks'] = npeaks
try:
xpeak = float(pd['X'][pd['Z']==pd['Z'].max()][0])
ypeak = float(pd['Y'][pd['Z']==pd['Z'].max()][0])
except IndexError:
print pd
print zid, clist
for k,v in answer.iteritems():
if v['ind'] == xk:
answer[k]['ir_peak'] = (xpeak,ypeak)
# Don't write to consensus for serializable JSON object
if write_peak_data:
answer[k]['peak_data'] = pd
answer[k]['ir_x'] = x_exists
answer[k]['ir_y'] = y_exists
else:
# Note: need to actually put a limit in if less than half of users selected IR counterpart.
# Right now it still IDs a sources even if only 1/10 users said it was there.
for k,v in answer.iteritems():
if v['ind'] == xk:
# Case 1: multiple users selected IR source, but not enough unique points to pinpoint peak
if most_common_ir != (-99,-99) and len(x_exists) > 0 and len(y_exists) > 0:
answer[k]['ir'] = (x_exists[0],y_exists[0])
# Case 2: most users have selected No Sources
else:
answer[k]['ir'] = (-99,-99)
return cons
if __name__ == "__main__":
checksum()
| mit |
appapantula/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
wzbozon/statsmodels | statsmodels/genmod/tests/test_glm.py | 19 | 37824 | """
Test functions for models.GLM
"""
from statsmodels.compat import range
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_raises,
assert_allclose, assert_, assert_array_less, dec)
from scipy import stats
import statsmodels.api as sm
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.tools.tools import add_constant
from statsmodels.tools.sm_exceptions import PerfectSeparationError
from statsmodels.discrete import discrete_model as discrete
from nose import SkipTest
import warnings
# Test Precisions
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_0 = 0
try:
import matplotlib.pyplot as plt #makes plt available for test functions
have_matplotlib = True
except:
have_matplotlib = False
pdf_output = False
if pdf_output:
from matplotlib.backends.backend_pdf import PdfPages
pdf = PdfPages("test_glm.pdf")
else:
pdf = None
def close_or_save(pdf, fig):
if pdf_output:
pdf.savefig(fig)
plt.close(fig)
def teardown_module():
if have_matplotlib:
plt.close('all')
if pdf_output:
pdf.close()
class CheckModelResultsMixin(object):
'''
res2 should be either the results from RModelWrap
or the results as defined in model_results_data
'''
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_bse = DECIMAL_4
def test_standard_errors(self):
assert_almost_equal(self.res1.bse, self.res2.bse, self.decimal_bse)
decimal_resids = DECIMAL_4
def test_residuals(self):
resids = np.column_stack((self.res1.resid_pearson,
self.res1.resid_deviance, self.res1.resid_working,
self.res1.resid_anscombe, self.res1.resid_response))
assert_almost_equal(resids, self.res2.resids, self.decimal_resids)
decimal_aic_R = DECIMAL_4
def test_aic_R(self):
# R includes the estimation of the scale as a lost dof
# Doesn't with Gamma though
if self.res1.scale != 1:
dof = 2
else:
dof = 0
assert_almost_equal(self.res1.aic+dof, self.res2.aic_R,
self.decimal_aic_R)
decimal_aic_Stata = DECIMAL_4
def test_aic_Stata(self):
# Stata uses the below llf for aic definition for these families
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu, scale=1)
aic = (-2*llf+2*(self.res1.df_model+1))/self.res1.nobs
else:
aic = self.res1.aic/self.res1.nobs
assert_almost_equal(aic, self.res2.aic_Stata, self.decimal_aic_Stata)
decimal_deviance = DECIMAL_4
def test_deviance(self):
assert_almost_equal(self.res1.deviance, self.res2.deviance,
self.decimal_deviance)
decimal_scale = DECIMAL_4
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale,
self.decimal_scale)
decimal_loglike = DECIMAL_4
def test_loglike(self):
# Stata uses the below llf for these families
# We differ with R for them
if isinstance(self.res1.model.family, (sm.families.Gamma,
sm.families.InverseGaussian)):
llf = self.res1.model.family.loglike(self.res1.model.endog,
self.res1.mu, scale=1)
else:
llf = self.res1.llf
assert_almost_equal(llf, self.res2.llf, self.decimal_loglike)
decimal_null_deviance = DECIMAL_4
def test_null_deviance(self):
assert_almost_equal(self.res1.null_deviance, self.res2.null_deviance,
self.decimal_null_deviance)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic_Stata,
self.decimal_bic)
def test_degrees(self):
assert_equal(self.res1.model.df_resid,self.res2.df_resid)
decimal_fittedvalues = DECIMAL_4
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
self.decimal_fittedvalues)
def test_tpvalues(self):
# test comparing tvalues and pvalues with normal implementation
# make sure they use normal distribution (inherited in results class)
params = self.res1.params
tvalues = params / self.res1.bse
pvalues = stats.norm.sf(np.abs(tvalues)) * 2
half_width = stats.norm.isf(0.025) * self.res1.bse
conf_int = np.column_stack((params - half_width, params + half_width))
assert_almost_equal(self.res1.tvalues, tvalues)
assert_almost_equal(self.res1.pvalues, pvalues)
assert_almost_equal(self.res1.conf_int(), conf_int)
def test_summary(self):
#SMOKE test
self.res1.summary()
self.res1.summary2()
class CheckComparisonMixin(object):
def test_compare_discrete(self):
res1 = self.res1
resd = self.resd
assert_allclose(res1.llf, resd.llf, rtol=1e-10)
score_obs1 = res1.model.score_obs(res1.params)
score_obsd = resd.model.score_obs(resd.params)
assert_allclose(score_obs1, score_obsd, rtol=1e-10)
# score
score1 = res1.model.score(res1.params)
assert_allclose(score1, score_obs1.sum(0), atol=1e-20)
assert_allclose(score1, np.zeros(score_obs1.shape[1]), atol=1e-7)
hessian1 = res1.model.hessian(res1.params, observed=False)
hessiand = resd.model.hessian(resd.params)
assert_allclose(hessian1, hessiand, rtol=1e-10)
hessian1 = res1.model.hessian(res1.params, observed=True)
hessiand = resd.model.hessian(resd.params)
assert_allclose(hessian1, hessiand, rtol=1e-9)
def test_score_test(self):
res1 = self.res1
# fake example, should be zero, k_constraint should be 0
st, pv, df = res1.model.score_test(res1.params, k_constraints=1)
assert_allclose(st, 0, atol=1e-20)
assert_allclose(pv, 1, atol=1e-10)
assert_equal(df, 1)
st, pv, df = res1.model.score_test(res1.params, k_constraints=0)
assert_allclose(st, 0, atol=1e-20)
assert_(np.isnan(pv), msg=repr(pv))
assert_equal(df, 0)
# TODO: no verified numbers largely SMOKE test
exog_extra = res1.model.exog[:,1]**2
st, pv, df = res1.model.score_test(res1.params, exog_extra=exog_extra)
assert_array_less(0.1, st)
assert_array_less(0.1, pv)
assert_equal(df, 1)
class TestGlmGaussian(CheckModelResultsMixin):
def __init__(self):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
self.decimal_resids = DECIMAL_3
self.decimal_params = DECIMAL_2
self.decimal_bic = DECIMAL_0
self.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
self.data = load()
self.data.exog = add_constant(self.data.exog, prepend=False)
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.Gaussian()).fit()
from .results.results_glm import Longley
self.res2 = Longley()
def test_compare_OLS(self):
res1 = self.res1
# OLS doesn't define score_obs
from statsmodels.regression.linear_model import OLS
resd = OLS(self.data.endog, self.data.exog).fit()
self.resd = resd # attach to access from the outside
assert_allclose(res1.llf, resd.llf, rtol=1e-10)
score_obs1 = res1.model.score_obs(res1.params, scale=None)
score_obsd = resd.resid[:, None] / resd.scale * resd.model.exog
# low precision because of badly scaled exog
assert_allclose(score_obs1, score_obsd, rtol=1e-8)
score_obs1 = res1.model.score_obs(res1.params, scale=1)
score_obsd = resd.resid[:, None] * resd.model.exog
assert_allclose(score_obs1, score_obsd, rtol=1e-8)
hess_obs1 = res1.model.hessian(res1.params, scale=None)
hess_obsd = -1. / resd.scale * resd.model.exog.T.dot(resd.model.exog)
# low precision because of badly scaled exog
assert_allclose(hess_obs1, hess_obsd, rtol=1e-8)
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# Gauss = r.gaussian
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm, family=Gauss)
# self.res2.resids = np.array(self.res2.resid)[:,None]*np.ones((1,5))
# self.res2.null_deviance = 185008826 # taken from R. Rpy bug?
class TestGaussianLog(CheckModelResultsMixin):
def __init__(self):
# Test Precision
self.decimal_aic_R = DECIMAL_0
self.decimal_aic_Stata = DECIMAL_2
self.decimal_loglike = DECIMAL_0
self.decimal_null_deviance = DECIMAL_1
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
# y = 1.0 - .02*x - .001*x**2 + 0.001 * np.random.randn(nobs)
self.X = np.c_[np.ones((nobs,1)),x,x**2]
self.lny = np.exp(-(-1.0 + 0.02*x + 0.0001*x**2)) +\
0.001 * np.random.randn(nobs)
GaussLog_Model = GLM(self.lny, self.X, \
family=sm.families.Gaussian(sm.families.links.log))
self.res1 = GaussLog_Model.fit()
from .results.results_glm import GaussianLog
self.res2 = GaussianLog()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed"
# GaussLogLink = r.gaussian(link = "log")
# GaussLog_Res_R = RModel(self.lny, self.X, r.glm, family=GaussLogLink)
# self.res2 = GaussLog_Res_R
class TestGaussianInverse(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_bic = DECIMAL_1
self.decimal_aic_R = DECIMAL_1
self.decimal_aic_Stata = DECIMAL_3
self.decimal_loglike = DECIMAL_1
self.decimal_resids = DECIMAL_3
nobs = 100
x = np.arange(nobs)
np.random.seed(54321)
y = 1.0 + 2.0 * x + x**2 + 0.1 * np.random.randn(nobs)
self.X = np.c_[np.ones((nobs,1)),x,x**2]
self.y_inv = (1. + .02*x + .001*x**2)**-1 + .001 * np.random.randn(nobs)
InverseLink_Model = GLM(self.y_inv, self.X,
family=sm.families.Gaussian(sm.families.links.inverse_power))
InverseLink_Res = InverseLink_Model.fit()
self.res1 = InverseLink_Res
from .results.results_glm import GaussianInverse
self.res2 = GaussianInverse()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# InverseLink = r.gaussian(link = "inverse")
# InverseLink_Res_R = RModel(self.y_inv, self.X, r.glm, family=InverseLink)
# self.res2 = InverseLink_Res_R
class TestGlmBinomial(CheckModelResultsMixin):
def __init__(self):
'''
Test Binomial family with canonical logit link using star98 dataset.
'''
self.decimal_resids = DECIMAL_1
self.decimal_bic = DECIMAL_2
from statsmodels.datasets.star98 import load
from .results.results_glm import Star98
data = load()
data.exog = add_constant(data.exog, prepend=False)
self.res1 = GLM(data.endog, data.exog, \
family=sm.families.Binomial()).fit()
#NOTE: if you want to replicate with RModel
#res2 = RModel(data.endog[:,0]/trials, data.exog, r.glm,
# family=r.binomial, weights=trials)
self.res2 = Star98()
#TODO:
#Non-Canonical Links for the Binomial family require the algorithm to be
#slightly changed
#class TestGlmBinomialLog(CheckModelResultsMixin):
# pass
#class TestGlmBinomialLogit(CheckModelResultsMixin):
# pass
#class TestGlmBinomialProbit(CheckModelResultsMixin):
# pass
#class TestGlmBinomialCloglog(CheckModelResultsMixin):
# pass
#class TestGlmBinomialPower(CheckModelResultsMixin):
# pass
#class TestGlmBinomialLoglog(CheckModelResultsMixin):
# pass
#class TestGlmBinomialLogc(CheckModelResultsMixin):
#TODO: need include logc link
# pass
class TestGlmBernoulli(CheckModelResultsMixin, CheckComparisonMixin):
def __init__(self):
from .results.results_glm import Lbw
self.res2 = Lbw()
self.res1 = GLM(self.res2.endog, self.res2.exog,
family=sm.families.Binomial()).fit()
modd = discrete.Logit(self.res2.endog, self.res2.exog)
self.resd = modd.fit(start_params=self.res1.params * 0.9, disp=False)
def score_test_r(self):
res1 = self.res1
res2 = self.res2
st, pv, df = res1.model.score_test(res1.params,
exog_extra=res1.model.exog[:, 1]**2)
st_res = 0.2837680293459376 # (-0.5326988167303712)**2
assert_allclose(st, st_res, rtol=1e-4)
st, pv, df = res1.model.score_test(res1.params,
exog_extra=res1.model.exog[:, 0]**2)
st_res = 0.6713492821514992 # (-0.8193590679009413)**2
assert_allclose(st, st_res, rtol=1e-4)
select = list(range(9))
select.pop(7)
res1b = GLM(res2.endog, res2.exog[:, select],
family=sm.families.Binomial()).fit()
tres = res1b.model.score_test(res1b.params,
exog_extra=res1.model.exog[:, -2])
tres = np.asarray(tres[:2]).ravel()
tres_r = (2.7864148487452, 0.0950667)
assert_allclose(tres, tres_r, rtol=1e-4)
cmd_r = """\
data = read.csv("...statsmodels\\statsmodels\\genmod\\tests\\results\\stata_lbw_glm.csv")
data["race_black"] = data["race"] == "black"
data["race_other"] = data["race"] == "other"
mod = glm(low ~ age + lwt + race_black + race_other + smoke + ptl + ht + ui, family=binomial, data=data)
options(digits=16)
anova(mod, test="Rao")
library(statmod)
s = glm.scoretest(mod, data["age"]**2)
s**2
s = glm.scoretest(mod, data["lwt"]**2)
s**2
"""
#class TestGlmBernoulliIdentity(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliLog(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliProbit(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliCloglog(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliPower(CheckModelResultsMixin):
# pass
#class TestGlmBernoulliLoglog(CheckModelResultsMixin):
# pass
#class test_glm_bernoulli_logc(CheckModelResultsMixin):
# pass
class TestGlmGamma(CheckModelResultsMixin):
def __init__(self):
'''
Tests Gamma family with canonical inverse link (power -1)
'''
# Test Precisions
self.decimal_aic_R = -1 #TODO: off by about 1, we are right with Stata
self.decimal_resids = DECIMAL_2
from statsmodels.datasets.scotland import load
from .results.results_glm import Scotvote
data = load()
data.exog = add_constant(data.exog, prepend=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res1 = GLM(data.endog, data.exog,
family=sm.families.Gamma()).fit()
self.res1 = res1
# res2 = RModel(data.endog, data.exog, r.glm, family=r.Gamma)
res2 = Scotvote()
res2.aic_R += 2 # R doesn't count degree of freedom for scale with gamma
self.res2 = res2
class TestGlmGammaLog(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_resids = DECIMAL_3
self.decimal_aic_R = DECIMAL_0
self.decimal_fittedvalues = DECIMAL_3
from .results.results_glm import CancerLog
res2 = CancerLog()
self.res1 = GLM(res2.endog, res2.exog,
family=sm.families.Gamma(link=sm.families.links.log)).fit()
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.Gamma(link="log"))
# self.res2.null_deviance = 27.92207137420696 # From R (bug in rpy)
# self.res2.bic = -154.1582089453923 # from Stata
class TestGlmGammaIdentity(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_resids = -100 #TODO Very off from Stata?
self.decimal_params = DECIMAL_2
self.decimal_aic_R = DECIMAL_0
self.decimal_loglike = DECIMAL_1
from .results.results_glm import CancerIdentity
res2 = CancerIdentity()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.res1 = GLM(res2.endog, res2.exog,
family=sm.families.Gamma(link=sm.families.links.identity)).fit()
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.Gamma(link="identity"))
# self.res2.null_deviance = 27.92207137420696 # from R, Rpy bug
class TestGlmPoisson(CheckModelResultsMixin, CheckComparisonMixin):
def __init__(self):
'''
Tests Poisson family with canonical log link.
Test results were obtained by R.
'''
from .results.results_glm import Cpunish
from statsmodels.datasets.cpunish import load
self.data = load()
self.data.exog[:,3] = np.log(self.data.exog[:,3])
self.data.exog = add_constant(self.data.exog, prepend=False)
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.Poisson()).fit()
self.res2 = Cpunish()
# compare with discrete, start close to save time
modd = discrete.Poisson(self.data.endog, self.data.exog)
self.resd = modd.fit(start_params=self.res1.params * 0.9, disp=False)
#class TestGlmPoissonIdentity(CheckModelResultsMixin):
# pass
#class TestGlmPoissonPower(CheckModelResultsMixin):
# pass
class TestGlmInvgauss(CheckModelResultsMixin):
def __init__(self):
'''
Tests the Inverse Gaussian family in GLM.
Notes
-----
Used the rndivgx.ado file provided by Hardin and Hilbe to
generate the data. Results are read from model_results, which
were obtained by running R_ig.s
'''
# Test Precisions
self.decimal_aic_R = DECIMAL_0
self.decimal_loglike = DECIMAL_0
from .results.results_glm import InvGauss
res2 = InvGauss()
res1 = GLM(res2.endog, res2.exog, \
family=sm.families.InverseGaussian()).fit()
self.res1 = res1
self.res2 = res2
class TestGlmInvgaussLog(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_aic_R = -10 # Big difference vs R.
self.decimal_resids = DECIMAL_3
from .results.results_glm import InvGaussLog
res2 = InvGaussLog()
self.res1 = GLM(res2.endog, res2.exog,
family=sm.families.InverseGaussian(link=\
sm.families.links.log)).fit()
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.inverse_gaussian(link="log"))
# self.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# self.res2.llf = -12162.72308 # from Stata, R's has big rounding diff
class TestGlmInvgaussIdentity(CheckModelResultsMixin):
def __init__(self):
# Test Precisions
self.decimal_aic_R = -10 #TODO: Big difference vs R
self.decimal_fittedvalues = DECIMAL_3
self.decimal_params = DECIMAL_3
from .results.results_glm import Medpar1
data = Medpar1()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.res1 = GLM(data.endog, data.exog,
family=sm.families.InverseGaussian(
link=sm.families.links.identity)).fit()
from .results.results_glm import InvGaussIdentity
self.res2 = InvGaussIdentity()
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed."
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.inverse_gaussian(link="identity"))
# self.res2.null_deviance = 335.1539777981053 # from R, Rpy bug
# self.res2.llf = -12163.25545 # from Stata, big diff with R
class TestGlmNegbinomial(CheckModelResultsMixin):
def __init__(self):
'''
Test Negative Binomial family with canonical log link
'''
# Test Precision
self.decimal_resid = DECIMAL_1
self.decimal_params = DECIMAL_3
self.decimal_resids = -1 # 1 % mismatch at 0
self.decimal_fittedvalues = DECIMAL_1
from statsmodels.datasets.committee import load
self.data = load()
self.data.exog[:,2] = np.log(self.data.exog[:,2])
interaction = self.data.exog[:,2]*self.data.exog[:,1]
self.data.exog = np.column_stack((self.data.exog,interaction))
self.data.exog = add_constant(self.data.exog, prepend=False)
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.NegativeBinomial()).fit()
from .results.results_glm import Committee
res2 = Committee()
res2.aic_R += 2 # They don't count a degree of freedom for the scale
self.res2 = res2
# def setup(self):
# if skipR:
# raise SkipTest, "Rpy not installed"
# r.library('MASS') # this doesn't work when done in rmodelwrap?
# self.res2 = RModel(self.data.endog, self.data.exog, r.glm,
# family=r.negative_binomial(1))
# self.res2.null_deviance = 27.8110469364343
#class TestGlmNegbinomial_log(CheckModelResultsMixin):
# pass
#class TestGlmNegbinomial_power(CheckModelResultsMixin):
# pass
#class TestGlmNegbinomial_nbinom(CheckModelResultsMixin):
# pass
#NOTE: hacked together version to test poisson offset
class TestGlmPoissonOffset(CheckModelResultsMixin):
@classmethod
def setupClass(cls):
from .results.results_glm import Cpunish
from statsmodels.datasets.cpunish import load
data = load()
data.exog[:,3] = np.log(data.exog[:,3])
data.exog = add_constant(data.exog, prepend=False)
exposure = [100] * len(data.endog)
cls.data = data
cls.exposure = exposure
cls.res1 = GLM(data.endog, data.exog, family=sm.families.Poisson(),
exposure=exposure).fit()
cls.res1.params[-1] += np.log(100) # add exposure back in to param
# to make the results the same
cls.res2 = Cpunish()
def test_missing(self):
# make sure offset is dropped correctly
endog = self.data.endog.copy()
endog[[2,4,6,8]] = np.nan
mod = GLM(endog, self.data.exog, family=sm.families.Poisson(),
exposure=self.exposure, missing='drop')
assert_equal(mod.exposure.shape[0], 13)
def test_offset_exposure(self):
# exposure=x and offset=log(x) should have the same effect
np.random.seed(382304)
endog = np.random.randint(0, 10, 100)
exog = np.random.normal(size=(100,3))
exposure = np.random.uniform(1, 2, 100)
offset = np.random.uniform(1, 2, 100)
mod1 = GLM(endog, exog, family=sm.families.Poisson(),
offset=offset, exposure=exposure).fit()
offset2 = offset + np.log(exposure)
mod2 = GLM(endog, exog, family=sm.families.Poisson(),
offset=offset2).fit()
assert_almost_equal(mod1.params, mod2.params)
# test recreating model
mod1_ = mod1.model
kwds = mod1_._get_init_kwds()
assert_allclose(kwds['exposure'], exposure, rtol=1e-14)
assert_allclose(kwds['offset'], mod1_.offset, rtol=1e-14)
mod3 = mod1_.__class__(mod1_.endog, mod1_.exog, **kwds)
assert_allclose(mod3.exposure, mod1_.exposure, rtol=1e-14)
assert_allclose(mod3.offset, mod1_.offset, rtol=1e-14)
def test_predict(self):
np.random.seed(382304)
endog = np.random.randint(0, 10, 100)
exog = np.random.normal(size=(100,3))
exposure = np.random.uniform(1, 2, 100)
mod1 = GLM(endog, exog, family=sm.families.Poisson(),
exposure=exposure).fit()
exog1 = np.random.normal(size=(10,3))
exposure1 = np.random.uniform(1, 2, 10)
# Doubling exposure time should double expected response
pred1 = mod1.predict(exog=exog1, exposure=exposure1)
pred2 = mod1.predict(exog=exog1, exposure=2*exposure1)
assert_almost_equal(pred2, 2*pred1)
# Check exposure defaults
pred3 = mod1.predict()
pred4 = mod1.predict(exposure=exposure)
pred5 = mod1.predict(exog=exog, exposure=exposure)
assert_almost_equal(pred3, pred4)
assert_almost_equal(pred4, pred5)
# Check offset defaults
offset = np.random.uniform(1, 2, 100)
mod2 = GLM(endog, exog, offset=offset, family=sm.families.Poisson()).fit()
pred1 = mod2.predict()
pred2 = mod2.predict(offset=offset)
pred3 = mod2.predict(exog=exog, offset=offset)
assert_almost_equal(pred1, pred2)
assert_almost_equal(pred2, pred3)
# Check that offset shifts the linear predictor
mod3 = GLM(endog, exog, family=sm.families.Poisson()).fit()
offset = np.random.uniform(1, 2, 10)
pred1 = mod3.predict(exog=exog1, offset=offset, linear=True)
pred2 = mod3.predict(exog=exog1, offset=2*offset, linear=True)
assert_almost_equal(pred2, pred1+offset)
def test_prefect_pred():
cur_dir = os.path.dirname(os.path.abspath(__file__))
iris = np.genfromtxt(os.path.join(cur_dir, 'results', 'iris.csv'),
delimiter=",", skip_header=1)
y = iris[:,-1]
X = iris[:,:-1]
X = X[y != 2]
y = y[y != 2]
X = add_constant(X, prepend=True)
glm = GLM(y, X, family=sm.families.Binomial())
assert_raises(PerfectSeparationError, glm.fit)
def test_score_test_OLS():
# nicer example than Longley
from statsmodels.regression.linear_model import OLS
np.random.seed(5)
nobs = 100
sige = 0.5
x = np.random.uniform(0, 1, size=(nobs, 5))
x[:, 0] = 1
beta = 1. / np.arange(1., x.shape[1] + 1)
y = x.dot(beta) + sige * np.random.randn(nobs)
res_ols = OLS(y, x).fit()
res_olsc = OLS(y, x[:, :-2]).fit()
co = res_ols.compare_lm_test(res_olsc, demean=False)
res_glm = GLM(y, x[:, :-2], family=sm.families.Gaussian()).fit()
co2 = res_glm.model.score_test(res_glm.params, exog_extra=x[:, -2:])
# difference in df_resid versus nobs in scale see #1786
assert_allclose(co[0] * 97 / 100., co2[0], rtol=1e-13)
def test_attribute_writable_resettable():
# Regression test for mutables and class constructors.
data = sm.datasets.longley.load()
endog, exog = data.endog, data.exog
glm_model = sm.GLM(endog, exog)
assert_equal(glm_model.family.link.power, 1.0)
glm_model.family.link.power = 2.
assert_equal(glm_model.family.link.power, 2.0)
glm_model2 = sm.GLM(endog, exog)
assert_equal(glm_model2.family.link.power, 1.0)
class Test_start_params(CheckModelResultsMixin):
def __init__(self):
'''
Test Gaussian family with canonical identity link
'''
# Test Precisions
self.decimal_resids = DECIMAL_3
self.decimal_params = DECIMAL_2
self.decimal_bic = DECIMAL_0
self.decimal_bse = DECIMAL_3
from statsmodels.datasets.longley import load
self.data = load()
self.data.exog = add_constant(self.data.exog, prepend=False)
params = sm.OLS(self.data.endog, self.data.exog).fit().params
self.res1 = GLM(self.data.endog, self.data.exog,
family=sm.families.Gaussian()).fit(start_params=params)
from .results.results_glm import Longley
self.res2 = Longley()
def test_glm_start_params():
# see 1604
y2 = np.array('0 1 0 0 0 1'.split(), int)
wt = np.array([50,1,50,1,5,10])
y2 = np.repeat(y2, wt)
x2 = np.repeat([0,0,0.001,100,-1,-1], wt)
mod = sm.GLM(y2, sm.add_constant(x2), family=sm.families.Binomial())
res = mod.fit(start_params=[-4, -5])
np.testing.assert_almost_equal(res.params, [-4.60305022, -5.29634545], 6)
def test_loglike_no_opt():
# see 1728
y = np.asarray([0, 1, 0, 0, 1, 1, 0, 1, 1, 1])
x = np.arange(10, dtype=np.float64)
def llf(params):
lin_pred = params[0] + params[1]*x
pr = 1 / (1 + np.exp(-lin_pred))
return np.sum(y*np.log(pr) + (1-y)*np.log(1-pr))
for params in [0,0], [0,1], [0.5,0.5]:
mod = sm.GLM(y, sm.add_constant(x), family=sm.families.Binomial())
res = mod.fit(start_params=params, maxiter=0)
like = llf(params)
assert_almost_equal(like, res.llf)
def test_formula_missing_exposure():
# see 2083
import statsmodels.formula.api as smf
import pandas as pd
d = {'Foo': [1, 2, 10, 149], 'Bar': [1, 2, 3, np.nan],
'constant': [1] * 4, 'exposure' : np.random.uniform(size=4),
'x': [1, 3, 2, 1.5]}
df = pd.DataFrame(d)
family = sm.families.Gaussian(link=sm.families.links.log)
mod = smf.glm("Foo ~ Bar", data=df, exposure=df.exposure,
family=family)
assert_(type(mod.exposure) is np.ndarray, msg='Exposure is not ndarray')
exposure = pd.Series(np.random.uniform(size=5))
assert_raises(ValueError, smf.glm, "Foo ~ Bar", data=df,
exposure=exposure, family=family)
assert_raises(ValueError, GLM, df.Foo, df[['constant', 'Bar']],
exposure=exposure, family=family)
@dec.skipif(not have_matplotlib)
def test_plots():
np.random.seed(378)
n = 200
exog = np.random.normal(size=(n, 2))
lin_pred = exog[:, 0] + exog[:, 1]**2
prob = 1 / (1 + np.exp(-lin_pred))
endog = 1 * (np.random.uniform(size=n) < prob)
model = sm.GLM(endog, exog, family=sm.families.Binomial())
result = model.fit()
import matplotlib.pyplot as plt
import pandas as pd
from statsmodels.graphics.regressionplots import add_lowess
# array interface
for j in 0,1:
fig = result.plot_added_variable(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_partial_residuals(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_ceres_residuals(j)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
# formula interface
data = pd.DataFrame({"y": endog, "x1": exog[:, 0], "x2": exog[:, 1]})
model = sm.GLM.from_formula("y ~ x1 + x2", data, family=sm.families.Binomial())
result = model.fit()
for j in 0,1:
xname = ["x1", "x2"][j]
fig = result.plot_added_variable(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_partial_residuals(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
fig = result.plot_ceres_residuals(xname)
add_lowess(fig.axes[0], frac=0.5)
close_or_save(pdf, fig)
def gen_endog(lin_pred, family_class, link, binom_version=0):
np.random.seed(872)
fam = sm.families
mu = link().inverse(lin_pred)
if family_class == fam.Binomial:
if binom_version == 0:
endog = 1*(np.random.uniform(size=len(lin_pred)) < mu)
else:
endog = np.empty((len(lin_pred), 2))
n = 10
endog[:, 0] = (np.random.uniform(size=(len(lin_pred), n)) < mu[:, None]).sum(1)
endog[:, 1] = n - endog[:, 0]
elif family_class == fam.Poisson:
endog = np.random.poisson(mu)
elif family_class == fam.Gamma:
endog = np.random.gamma(2, mu)
elif family_class == fam.Gaussian:
endog = mu + np.random.normal(size=len(lin_pred))
elif family_class == fam.NegativeBinomial:
from scipy.stats.distributions import nbinom
endog = nbinom.rvs(mu, 0.5)
elif family_class == fam.InverseGaussian:
from scipy.stats.distributions import invgauss
endog = invgauss.rvs(mu)
else:
raise ValueError
return endog
def test_summary():
"""
Smoke test for summary.
"""
np.random.seed(4323)
n = 100
exog = np.random.normal(size=(n, 2))
exog[:, 0] = 1
endog = np.random.normal(size=n)
for method in "irls", "cg":
fa = sm.families.Gaussian()
model = sm.GLM(endog, exog, family=fa)
rslt = model.fit(method=method)
s = rslt.summary()
def test_gradient_irls():
"""
Compare the results when using gradient optimization and IRLS.
"""
# TODO: Find working examples for inverse_squared link
np.random.seed(87342)
fam = sm.families
lnk = sm.families.links
families = [(fam.Binomial, [lnk.logit, lnk.probit, lnk.cloglog, lnk.log, lnk.cauchy]),
(fam.Poisson, [lnk.log, lnk.identity, lnk.sqrt]),
(fam.Gamma, [lnk.log, lnk.identity, lnk.inverse_power]),
(fam.Gaussian, [lnk.identity, lnk.log, lnk.inverse_power]),
(fam.InverseGaussian, [lnk.log, lnk.identity, lnk.inverse_power, lnk.inverse_squared]),
(fam.NegativeBinomial, [lnk.log, lnk.inverse_power, lnk.inverse_squared, lnk.identity])]
n = 100
p = 3
exog = np.random.normal(size=(n, p))
exog[:, 0] = 1
for family_class, family_links in families:
for link in family_links:
for binom_version in 0,1:
if family_class != fam.Binomial and binom_version == 1:
continue
if (family_class, link) == (fam.Poisson, lnk.identity):
lin_pred = 20 + exog.sum(1)
elif (family_class, link) == (fam.Binomial, lnk.log):
lin_pred = -1 + exog.sum(1) / 8
elif (family_class, link) == (fam.Poisson, lnk.sqrt):
lin_pred = 2 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.log):
lin_pred = -1 + exog.sum(1)
elif (family_class, link) == (fam.InverseGaussian, lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_squared):
lin_pred = 0.5 + exog.sum(1) / 5
continue # skip due to non-convergence
elif (family_class, link) == (fam.InverseGaussian, lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
elif (family_class, link) == (fam.NegativeBinomial, lnk.identity):
lin_pred = 20 + 5*exog.sum(1)
lin_pred = np.clip(lin_pred, 1e-4, np.inf)
elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_squared):
lin_pred = 0.1 + np.random.uniform(size=exog.shape[0])
continue # skip due to non-convergence
elif (family_class, link) == (fam.NegativeBinomial, lnk.inverse_power):
lin_pred = 1 + exog.sum(1) / 5
else:
lin_pred = np.random.uniform(size=exog.shape[0])
endog = gen_endog(lin_pred, family_class, link, binom_version)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_irls = sm.GLM(endog, exog, family=family_class(link=link))
rslt_irls = mod_irls.fit(method="IRLS")
# Try with and without starting values.
for max_start_irls, start_params in (0, rslt_irls.params), (1, None):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mod_gradient = sm.GLM(endog, exog, family=family_class(link=link))
rslt_gradient = mod_gradient.fit(max_start_irls=max_start_irls,
start_params=start_params,
method="newton")
assert_allclose(rslt_gradient.params,
rslt_irls.params, rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.llf, rslt_irls.llf,
rtol=1e-6, atol=1e-6)
assert_allclose(rslt_gradient.scale, rslt_irls.scale,
rtol=1e-6, atol=1e-6)
# Get the standard errors using expected information.
gradient_bse = rslt_gradient.bse
ehess = mod_gradient.hessian(rslt_gradient.params, observed=False)
gradient_bse = np.sqrt(-np.diag(np.linalg.inv(ehess)))
assert_allclose(gradient_bse, rslt_irls.bse, rtol=1e-6, atol=1e-6)
if __name__=="__main__":
#run_module_suite()
#taken from Fernando Perez:
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb'],
exit=False)
| bsd-3-clause |
TNick/pylearn2 | pylearn2/cross_validation/dataset_iterators.py | 29 | 19389 | """
Cross-validation dataset iterators.
"""
__author__ = "Steven Kearnes"
__copyright__ = "Copyright 2014, Stanford University"
__license__ = "3-clause BSD"
import numpy as np
import warnings
try:
from sklearn.cross_validation import (KFold, StratifiedKFold, ShuffleSplit,
StratifiedShuffleSplit)
except ImportError:
warnings.warn("Could not import from sklearn.")
from pylearn2.compat import OrderedDict
from pylearn2.cross_validation.blocks import StackedBlocksCV
from pylearn2.cross_validation.subset_iterators import (
ValidationKFold, StratifiedValidationKFold, ValidationShuffleSplit,
StratifiedValidationShuffleSplit)
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.datasets.transformer_dataset import TransformerDataset
class DatasetCV(object):
"""
Construct a new DenseDesignMatrix for each subset.
Parameters
----------
dataset : object
Full dataset for use in cross validation.
subset_iterator : iterable
Iterable that returns (train, test) or (train, valid, test) indices
for partitioning the dataset during cross-validation.
preprocessor : Preprocessor or None
Preprocessor to apply to child datasets.
fit_preprocessor : bool
Whether preprocessor can fit parameters when applied to training
data.
which_set : str, list or None
If None, return all subset datasets. If one or more of 'train',
'valid', or 'test', return only the dataset(s) corresponding to the
given subset(s).
return_dict : bool
Whether to return subset datasets as a dictionary. If True,
returns a dict with keys 'train', 'valid', and/or 'test' (if
subset_iterator returns two subsets per partition, 'train' and
'test' are used, and if subset_iterator returns three subsets per
partition, 'train', 'valid', and 'test' are used). If False,
returns a list of datasets matching the subset order given by
subset_iterator.
"""
def __init__(self, dataset, subset_iterator, preprocessor=None,
fit_preprocessor=False, which_set=None, return_dict=True):
self.dataset = dataset
self.subset_iterator = list(subset_iterator) # allow generator reuse
dataset_iterator = dataset.iterator(mode='sequential', num_batches=1,
data_specs=dataset.data_specs,
return_tuple=True)
self._data = dataset_iterator.next()
self.preprocessor = preprocessor
self.fit_preprocessor = fit_preprocessor
self.which_set = which_set
if which_set is not None:
which_set = np.atleast_1d(which_set)
assert len(which_set)
for label in which_set:
if label not in ['train', 'valid', 'test']:
raise ValueError("Unrecognized subset '{}'".format(label))
self.which_set = which_set
self.return_dict = return_dict
def get_data_subsets(self):
"""
Partition the dataset according to cross-validation subsets and
return the raw data in each subset.
"""
for subsets in self.subset_iterator:
labels = None
if len(subsets) == 3:
labels = ['train', 'valid', 'test']
elif len(subsets) == 2:
labels = ['train', 'test']
# data_subsets is an OrderedDict to maintain label order
data_subsets = OrderedDict()
for i, subset in enumerate(subsets):
subset_data = tuple(data[subset] for data in self._data)
if len(subset_data) == 2:
X, y = subset_data
else:
X, = subset_data
y = None
data_subsets[labels[i]] = (X, y)
yield data_subsets
def __iter__(self):
"""
Create a DenseDesignMatrix for each dataset subset and apply any
preprocessing to the child datasets.
"""
for data_subsets in self.get_data_subsets():
datasets = {}
for label, data in data_subsets.items():
X, y = data
datasets[label] = DenseDesignMatrix(X=X, y=y)
# preprocessing
if self.preprocessor is not None:
self.preprocessor.apply(datasets['train'],
can_fit=self.fit_preprocessor)
for label, dataset in datasets.items():
if label == 'train':
continue
self.preprocessor.apply(dataset, can_fit=False)
# which_set
if self.which_set is not None:
for label, dataset in list(datasets.items()):
if label not in self.which_set:
del datasets[label]
del data_subsets[label]
if not len(datasets):
raise ValueError("No matching dataset(s) for " +
"{}".format(self.which_set))
if not self.return_dict:
# data_subsets is an OrderedDict to maintain label order
datasets = list(datasets[label]
for label in data_subsets.keys())
if len(datasets) == 1:
datasets, = datasets
yield datasets
class StratifiedDatasetCV(DatasetCV):
"""
Subclass of DatasetCV for stratified experiments, where
the relative class proportions of the full dataset are maintained in
each partition.
Parameters
----------
dataset : object
Dataset to use in cross validation.
subset_iterator : iterable
Iterable that returns train/test or train/valid/test splits for
partitioning the dataset during cross-validation.
preprocessor : Preprocessor or None
Preprocessor to apply to child datasets.
fit_preprocessor : bool
Whether preprocessor can fit parameters when applied to training
data.
which_set : str, list or None
If None, return all subset datasets. If one or more of 'train',
'valid', or 'test', return only the dataset(s) corresponding to the
given subset(s).
return_dict : bool
Whether to return subset datasets as a dictionary. If True,
returns a dict with keys 'train', 'valid', and/or 'test' (if
subset_iterator returns two subsets per partition, 'train' and
'test' are used, and if subset_iterator returns three subsets per
partition, 'train', 'valid', and 'test' are used). If False,
returns a list of datasets matching the subset order given by
subset_iterator.
"""
@staticmethod
def get_y(dataset):
"""
Stratified cross-validation requires label information for
examples. This function gets target values for a dataset,
converting from one-hot encoding to a 1D array as needed.
Parameters
----------
dataset : object
Dataset containing target values for examples.
"""
y = np.asarray(dataset.y)
if y.ndim > 1:
assert np.array_equal(np.unique(y), [0, 1])
y = np.argmax(y, axis=1)
return y
class TransformerDatasetCV(object):
"""
Cross-validation with dataset transformations. This class returns
dataset subsets after transforming them with one or more pretrained
models.
Parameters
----------
dataset_iterator : DatasetCV
Cross-validation dataset iterator providing train/test or
train/valid/test datasets.
transformers : Model or iterable
Transformer model(s) to use for transforming datasets.
"""
def __init__(self, dataset_iterator, transformers):
self.dataset_iterator = dataset_iterator
self.transformers = transformers
def __iter__(self):
"""
Construct a Transformer dataset for each partition.
"""
for k, datasets in enumerate(self.dataset_iterator):
if isinstance(self.transformers, list):
transformer = self.transformers[k]
elif isinstance(self.transformers, StackedBlocksCV):
transformer = self.transformers.select_fold(k)
else:
transformer = self.transformers
if isinstance(datasets, list):
for i, dataset in enumerate(datasets):
datasets[i] = TransformerDataset(dataset, transformer)
else:
for key, dataset in datasets.items():
datasets[key] = TransformerDataset(dataset, transformer)
yield datasets
class DatasetKFold(DatasetCV):
"""
K-fold cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds.
shuffle : bool
Whether to shuffle the dataset before partitioning.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
n = dataset.X.shape[0]
cv = KFold(n, n_folds=n_folds, shuffle=shuffle,
random_state=random_state)
super(DatasetKFold, self).__init__(dataset, cv, **kwargs)
class StratifiedDatasetKFold(StratifiedDatasetCV):
"""
Stratified K-fold cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds.
shuffle : bool
Whether to shuffle the dataset before partitioning.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
y = self.get_y(dataset)
try:
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=shuffle,
random_state=random_state)
except TypeError:
assert not shuffle and not random_state, (
"The 'shuffle' and 'random_state' arguments are not " +
"supported by this version of sklearn. See "
"http://scikit-learn.org/stable/developers/index.html" +
"#git-repo for details on installing the development version.")
cv = StratifiedKFold(y, n_folds=n_folds)
super(StratifiedDatasetKFold, self).__init__(dataset, cv, **kwargs)
class DatasetShuffleSplit(DatasetCV):
"""
Shuffle-split cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle-split iterations.
test_size : float, int, or None
If float, intepreted as the proportion of examples in the test set.
If int, interpreted as the absolute number of examples in the test
set. If None, adjusted to the complement of train_size.
train_size : float, int, or None
If float, intepreted as the proportion of examples in the training
set. If int, interpreted as the absolute number of examples in the
training set. If None, adjusted to the complement of test_size.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, train_size=None,
random_state=None, **kwargs):
n = dataset.X.shape[0]
cv = ShuffleSplit(n, n_iter=n_iter, test_size=test_size,
train_size=train_size, random_state=random_state)
super(DatasetShuffleSplit, self).__init__(dataset, cv, **kwargs)
class StratifiedDatasetShuffleSplit(StratifiedDatasetCV):
"""
Stratified shuffle-split cross-validation.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle-split iterations.
test_size : float, int, or None
If float, intepreted as the proportion of examples in the test set.
If int, interpreted as the absolute number of examples in the test
set. If None, adjusted to the complement of train_size.
train_size : float, int, or None
If float, intepreted as the proportion of examples in the training
set. If int, interpreted as the absolute number of examples in the
training set. If None, adjusted to the complement of test_size.
random_state : int or RandomState
Random number generator used for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, train_size=None,
random_state=None, **kwargs):
y = self.get_y(dataset)
cv = StratifiedShuffleSplit(y, n_iter=n_iter, test_size=test_size,
train_size=train_size,
random_state=random_state)
super(StratifiedDatasetShuffleSplit, self).__init__(dataset, cv,
**kwargs)
class DatasetValidationKFold(DatasetCV):
"""
K-fold cross-validation with train/valid/test subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds. Must be at least 3.
shuffle : bool
Whether to shuffle the data before splitting.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
n = dataset.X.shape[0]
cv = ValidationKFold(n, n_folds, shuffle, random_state)
super(DatasetValidationKFold, self).__init__(dataset, cv, **kwargs)
class StratifiedDatasetValidationKFold(StratifiedDatasetCV):
"""
Stratified K-fold cross-validation with train/valid/test subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_folds : int
Number of cross-validation folds. Must be at least 3.
shuffle : bool
Whether to shuffle the data before splitting.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_folds=3, shuffle=False, random_state=None,
**kwargs):
y = self.get_y(dataset)
cv = StratifiedValidationKFold(y, n_folds, shuffle, random_state)
super(StratifiedDatasetValidationKFold, self).__init__(dataset, cv,
**kwargs)
class DatasetValidationShuffleSplit(DatasetCV):
"""
Shuffle-split cross-validation with train/valid/test subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle/split iterations.
test_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of train_size + valid_size.
valid_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to match
test_size.
train_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of valid_size + test_size.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, valid_size=None,
train_size=None, random_state=None, **kwargs):
n = dataset.X.shape[0]
cv = ValidationShuffleSplit(n, n_iter, test_size, valid_size,
train_size, random_state)
super(DatasetValidationShuffleSplit, self).__init__(dataset, cv,
**kwargs)
class StratifiedDatasetValidationShuffleSplit(StratifiedDatasetCV):
"""
Stratified shuffle-split cross-validation with train/valid/test
subsets.
Parameters
----------
dataset : object
Dataset to use for cross-validation.
n_iter : int
Number of shuffle/split iterations.
test_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of train_size + valid_size.
valid_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to match
test_size.
train_size : float, int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the entire dataset to include in the validation
split. If int, represents the absolute number of validation
samples. If None, the value is automatically set to the complement
of valid_size + test_size.
random_state : int, RandomState, or None
Pseudorandom number seed or generator to use for shuffling.
kwargs : dict
Keyword arguments for DatasetCV.
"""
def __init__(self, dataset, n_iter=10, test_size=0.1, valid_size=None,
train_size=None, random_state=None, **kwargs):
y = self.get_y(dataset)
cv = StratifiedValidationShuffleSplit(y, n_iter, test_size, valid_size,
train_size, random_state)
super(StratifiedDatasetValidationShuffleSplit, self).__init__(dataset,
cv,
**kwargs)
| bsd-3-clause |
acapet/GHER-POSTPROC | Examples/Second.py | 1 | 1944 | import numpy as np
import numpy.ma as ma
from netCDF4 import Dataset
#from mpl_toolkits.basemap import Basemap
#from multiprocessing import Pool
#import gsw
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime as dt
import sys
import os
import G3D_class
G=G3D_class.G3D('../data/CART1CLIP/1980.nc')
t1,zforplot1=G.avgprofile('TEM',ztab=-np.array([0,10,20,30,50,100,200,300,400,500,700,1000]))
t2,zforplot2=G.avgprofile('TEM')
dates = [dt.datetime(1858,11,17)+dt.timedelta(days=int(t)) for t in G.time]
fig=plt.figure(figsize=(10, 8))
####################
# 1st figure : Age profile
####################
ax=plt.subplot(1, 2, 1)
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%Y'))
plt.pcolor(dates,zforplot1,t1.T)
plt.title('Temp')
plt.ylabel('depth - [m]')
plt.clim([7,12])
plt.ylim([-500,0])
plt.colorbar()
ax=plt.subplot(1, 2, 2)
ax.xaxis_date()
ax.xaxis.set_major_formatter(mdates.DateFormatter('%m/%Y'))
plt.pcolor(dates,zforplot2,t2.T)
plt.title('Temp')
plt.ylabel('depth - [m]')
plt.clim([7,12])
plt.ylim([-500,0])
plt.colorbar()
fig.savefig('../MeanTemProfile.png')
| gpl-3.0 |
cauchycui/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
Obus/scikit-learn | examples/calibration/plot_calibration.py | 225 | 4795 | """
======================================
Probability calibration of classifiers
======================================
When performing classification you often want to predict not only
the class label, but also the associated probability. This probability
gives you some kind of confidence on the prediction. However, not all
classifiers provide well-calibrated probabilities, some being over-confident
while others being under-confident. Thus, a separate calibration of predicted
probabilities is often desirable as a postprocessing. This example illustrates
two different methods for this calibration and evaluates the quality of the
returned probabilities using Brier's score
(see http://en.wikipedia.org/wiki/Brier_score).
Compared are the estimated probability using a Gaussian naive Bayes classifier
without calibration, with a sigmoid calibration, and with a non-parametric
isotonic calibration. One can observe that only the non-parametric model is able
to provide a probability calibration that returns probabilities close to the
expected 0.5 for most of the samples belonging to the middle cluster with
heterogeneous labels. This results in a significantly improved Brier score.
"""
print(__doc__)
# Author: Mathieu Blondel <mathieu@mblondel.org>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from sklearn.datasets import make_blobs
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.cross_validation import train_test_split
n_samples = 50000
n_bins = 3 # use 3 bins for calibration_curve as we have 3 clusters here
# Generate 3 blobs with 2 classes where the second blob contains
# half positive samples and half negative samples. Probability in this
# blob is therefore 0.5.
centers = [(-5, -5), (0, 0), (5, 5)]
X, y = make_blobs(n_samples=n_samples, n_features=2, cluster_std=1.0,
centers=centers, shuffle=False, random_state=42)
y[:n_samples // 2] = 0
y[n_samples // 2:] = 1
sample_weight = np.random.RandomState(42).rand(y.shape[0])
# split train, test for calibration
X_train, X_test, y_train, y_test, sw_train, sw_test = \
train_test_split(X, y, sample_weight, test_size=0.9, random_state=42)
# Gaussian Naive-Bayes with no calibration
clf = GaussianNB()
clf.fit(X_train, y_train) # GaussianNB itself does not support sample-weights
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with isotonic calibration
clf_isotonic = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_isotonic.fit(X_train, y_train, sw_train)
prob_pos_isotonic = clf_isotonic.predict_proba(X_test)[:, 1]
# Gaussian Naive-Bayes with sigmoid calibration
clf_sigmoid = CalibratedClassifierCV(clf, cv=2, method='sigmoid')
clf_sigmoid.fit(X_train, y_train, sw_train)
prob_pos_sigmoid = clf_sigmoid.predict_proba(X_test)[:, 1]
print("Brier scores: (the smaller the better)")
clf_score = brier_score_loss(y_test, prob_pos_clf, sw_test)
print("No calibration: %1.3f" % clf_score)
clf_isotonic_score = brier_score_loss(y_test, prob_pos_isotonic, sw_test)
print("With isotonic calibration: %1.3f" % clf_isotonic_score)
clf_sigmoid_score = brier_score_loss(y_test, prob_pos_sigmoid, sw_test)
print("With sigmoid calibration: %1.3f" % clf_sigmoid_score)
###############################################################################
# Plot the data and the predicted probabilities
plt.figure()
y_unique = np.unique(y)
colors = cm.rainbow(np.linspace(0.0, 1.0, y_unique.size))
for this_y, color in zip(y_unique, colors):
this_X = X_train[y_train == this_y]
this_sw = sw_train[y_train == this_y]
plt.scatter(this_X[:, 0], this_X[:, 1], s=this_sw * 50, c=color, alpha=0.5,
label="Class %s" % this_y)
plt.legend(loc="best")
plt.title("Data")
plt.figure()
order = np.lexsort((prob_pos_clf, ))
plt.plot(prob_pos_clf[order], 'r', label='No calibration (%1.3f)' % clf_score)
plt.plot(prob_pos_isotonic[order], 'g', linewidth=3,
label='Isotonic calibration (%1.3f)' % clf_isotonic_score)
plt.plot(prob_pos_sigmoid[order], 'b', linewidth=3,
label='Sigmoid calibration (%1.3f)' % clf_sigmoid_score)
plt.plot(np.linspace(0, y_test.size, 51)[1::2],
y_test[order].reshape(25, -1).mean(1),
'k', linewidth=3, label=r'Empirical')
plt.ylim([-0.05, 1.05])
plt.xlabel("Instances sorted according to predicted probability "
"(uncalibrated GNB)")
plt.ylabel("P(y=1)")
plt.legend(loc="upper left")
plt.title("Gaussian naive Bayes probabilities")
plt.show()
| bsd-3-clause |
rousseab/pymatgen | pymatgen/analysis/diffraction/xrd.py | 2 | 14724 | # coding: utf-8
from __future__ import division, unicode_literals
"""
This module implements an XRD pattern calculator.
"""
from six.moves import filter
from six.moves import map
from six.moves import zip
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "5/22/14"
from math import sin, cos, asin, pi, degrees, radians
import os
import numpy as np
import json
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
#XRD wavelengths in angstroms
WAVELENGTHS = {
"CuKa": 1.54184,
"CuKa2": 1.54439,
"CuKa1": 1.54056,
"CuKb1": 1.39222,
"MoKa": 0.71073,
"MoKa2": 0.71359,
"MoKa1": 0.70930,
"MoKb1": 0.63229,
"CrKa": 2.29100,
"CrKa2": 2.29361,
"CrKa1": 2.28970,
"CrKb1": 2.08487,
"FeKa": 1.93735,
"FeKa2": 1.93998,
"FeKa1": 1.93604,
"FeKb1": 1.75661,
"CoKa": 1.79026,
"CoKa2": 1.79285,
"CoKa1": 1.78896,
"CoKb1": 1.63079,
"AgKa": 0.560885,
"AgKa2": 0.563813,
"AgKa1": 0.559421,
"AgKb1": 0.497082,
}
with open(os.path.join(os.path.dirname(__file__),
"atomic_scattering_params.json")) as f:
ATOMIC_SCATTERING_PARAMS = json.load(f)
class XRDCalculator(object):
"""
Computes the XRD pattern of a crystal structure.
This code is implemented by Shyue Ping Ong as part of UCSD's NANO106 -
Crystallography of Materials. The formalism for this code is based on
that given in Chapters 11 and 12 of Structure of Materials by Marc De
Graef and Michael E. McHenry. This takes into account the atomic
scattering factors and the Lorentz polarization factor, but not
the Debye-Waller (temperature) factor (for which data is typically not
available). Note that the multiplicity correction is not needed since
this code simply goes through all reciprocal points within the limiting
sphere, which includes all symmetrically equivalent planes. The algorithm
is as follows
1. Calculate reciprocal lattice of structure. Find all reciprocal points
within the limiting sphere given by :math:`\\frac{2}{\\lambda}`.
2. For each reciprocal point :math:`\\mathbf{g_{hkl}}` corresponding to
lattice plane :math:`(hkl)`, compute the Bragg condition
:math:`\\sin(\\theta) = \\frac{\\lambda}{2d_{hkl}}`
3. Compute the structure factor as the sum of the atomic scattering
factors. The atomic scattering factors are given by
.. math::
f(s) = Z - 41.78214 \\times s^2 \\times \\sum\\limits_{i=1}^n a_i \
\exp(-b_is^2)
where :math:`s = \\frac{\\sin(\\theta)}{\\lambda}` and :math:`a_i`
and :math:`b_i` are the fitted parameters for each element. The
structure factor is then given by
.. math::
F_{hkl} = \\sum\\limits_{j=1}^N f_j \\exp(2\\pi i \\mathbf{g_{hkl}}
\cdot \\mathbf{r})
4. The intensity is then given by the modulus square of the structure
factor.
.. math::
I_{hkl} = F_{hkl}F_{hkl}^*
5. Finally, the Lorentz polarization correction factor is applied. This
factor is given by:
.. math::
P(\\theta) = \\frac{1 + \\cos^2(2\\theta)}
{\\sin^2(\\theta)\\cos(\\theta)}
"""
#Tuple of available radiation keywords.
AVAILABLE_RADIATION = tuple(WAVELENGTHS.keys())
#Tolerance in which to treat two peaks as having the same two theta.
TWO_THETA_TOL = 1e-5
# Tolerance in which to treat a peak as effectively 0 if the scaled
# intensity is less than this number. Since the max intensity is 100,
# this means the peak must be less than 1e-5 of the peak intensity to be
# considered as zero. This deals with numerical issues where systematic
# absences do not cancel exactly to zero.
SCALED_INTENSITY_TOL = 1e-3
def __init__(self, wavelength="CuKa", symprec=0, debye_waller_factors=None):
"""
Initializes the XRD calculator with a given radiation.
Args:
wavelength (str/float): The wavelength can be specified as either a
float or a string. If it is a string, it must be one of the
supported definitions in the AVAILABLE_RADIATION class
variable, which provides useful commonly used wavelengths.
If it is a float, it is interpreted as a wavelength in
angstroms. Defaults to "CuKa", i.e, Cu K_alpha radiation.
symprec (float): Symmetry precision for structure refinement. If
set to 0, no refinement is done. Otherwise, refinement is
performed using spglib with provided precision.
debye_waller_factors ({element symbol: float}): Allows the
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
"""
if isinstance(wavelength, float):
self.wavelength = wavelength
else:
self.radiation = wavelength
self.wavelength = WAVELENGTHS[wavelength]
self.symprec = symprec
self.debye_waller_factors = debye_waller_factors or {}
def get_xrd_data(self, structure, scaled=True, two_theta_range=(0, 90)):
"""
Calculates the XRD data for a structure.
Args:
structure (Structure): Input structure
scaled (bool): Whether to return scaled intensities. The maximum
peak is set to a value of 100. Defaults to True. Use False if
you need the absolute values to combine XRD plots.
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
Returns:
(XRD pattern) in the form of
[[two_theta, intensity, {(h, k, l): mult}, d_hkl], ...]
Two_theta is in degrees. Intensity is in arbitrary units and if
scaled (the default), has a maximum value of 100 for the highest
peak. {(h, k, l): mult} is a dict of Miller indices for all
diffracted lattice planes contributing to that intensity and
their multiplicities. d_hkl is the interplanar spacing.
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
wavelength = self.wavelength
latt = structure.lattice
is_hex = latt.is_hexagonal()
# Obtained from Bragg condition. Note that reciprocal lattice
# vector length is 1 / d_hkl.
min_r, max_r = (0, 2 / wavelength) if two_theta_range is None else \
[2 * sin(radians(t / 2)) / wavelength for t in two_theta_range]
# Obtain crystallographic reciprocal lattice points within range
recip_latt = latt.reciprocal_lattice_crystallographic
recip_pts = recip_latt.get_points_in_sphere(
[[0, 0, 0]], [0, 0, 0], max_r)
if min_r:
recip_pts = filter(lambda d: d[1] >= min_r, recip_pts)
# Create a flattened array of zs, coeffs, fcoords and occus. This is
# used to perform vectorized computation of atomic scattering factors
# later. Note that these are not necessarily the same size as the
# structure as each partially occupied specie occupies its own
# position in the flattened array.
zs = []
coeffs = []
fcoords = []
occus = []
dwfactors = []
for site in structure:
for sp, occu in site.species_and_occu.items():
zs.append(sp.Z)
try:
c = ATOMIC_SCATTERING_PARAMS[sp.symbol]
except KeyError:
raise ValueError("Unable to calculate XRD pattern as "
"there is no scattering coefficients for"
" %s." % sp.symbol)
coeffs.append(c)
dwfactors.append(self.debye_waller_factors.get(sp.symbol, 0))
fcoords.append(site.frac_coords)
occus.append(occu)
zs = np.array(zs)
coeffs = np.array(coeffs)
fcoords = np.array(fcoords)
occus = np.array(occus)
dwfactors = np.array(dwfactors)
peaks = {}
two_thetas = []
for hkl, g_hkl, ind in sorted(
recip_pts, key=lambda i: (i[1], -i[0][0], -i[0][1], -i[0][2])):
if g_hkl != 0:
d_hkl = 1 / g_hkl
# Bragg condition
theta = asin(wavelength * g_hkl / 2)
# s = sin(theta) / wavelength = 1 / 2d = |ghkl| / 2 (d =
# 1/|ghkl|)
s = g_hkl / 2
#Store s^2 since we are using it a few times.
s2 = s ** 2
# Vectorized computation of g.r for all fractional coords and
# hkl.
g_dot_r = np.dot(fcoords, np.transpose([hkl])).T[0]
# Highly vectorized computation of atomic scattering factors.
# Equivalent non-vectorized code is::
#
# for site in structure:
# el = site.specie
# coeff = ATOMIC_SCATTERING_PARAMS[el.symbol]
# fs = el.Z - 41.78214 * s2 * sum(
# [d[0] * exp(-d[1] * s2) for d in coeff])
fs = zs - 41.78214 * s2 * np.sum(
coeffs[:, :, 0] * np.exp(-coeffs[:, :, 1] * s2), axis=1)
dw_correction = np.exp(-dwfactors * s2)
# Structure factor = sum of atomic scattering factors (with
# position factor exp(2j * pi * g.r and occupancies).
# Vectorized computation.
f_hkl = np.sum(fs * occus * np.exp(2j * pi * g_dot_r)
* dw_correction)
#Lorentz polarization correction for hkl
lorentz_factor = (1 + cos(2 * theta) ** 2) / \
(sin(theta) ** 2 * cos(theta))
# Intensity for hkl is modulus square of structure factor.
i_hkl = (f_hkl * f_hkl.conjugate()).real
two_theta = degrees(2 * theta)
if is_hex:
#Use Miller-Bravais indices for hexagonal lattices.
hkl = (hkl[0], hkl[1], - hkl[0] - hkl[1], hkl[2])
#Deal with floating point precision issues.
ind = np.where(np.abs(np.subtract(two_thetas, two_theta)) <
XRDCalculator.TWO_THETA_TOL)
if len(ind[0]) > 0:
peaks[two_thetas[ind[0]]][0] += i_hkl * lorentz_factor
peaks[two_thetas[ind[0]]][1].append(tuple(hkl))
else:
peaks[two_theta] = [i_hkl * lorentz_factor, [tuple(hkl)],
d_hkl]
two_thetas.append(two_theta)
# Scale intensities so that the max intensity is 100.
max_intensity = max([v[0] for v in peaks.values()])
data = []
for k in sorted(peaks.keys()):
v = peaks[k]
scaled_intensity = v[0] / max_intensity * 100 if scaled else v[0]
fam = get_unique_families(v[1])
if scaled_intensity > XRDCalculator.SCALED_INTENSITY_TOL:
data.append([k, scaled_intensity, fam, v[2]])
return data
def get_xrd_plot(self, structure, two_theta_range=(0, 90),
annotate_peaks=True):
"""
Returns the XRD plot as a matplotlib.pyplot.
Args:
structure: Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks: Whether to annotate the peaks with plane
information.
Returns:
(matplotlib.pyplot)
"""
from pymatgen.util.plotting_utils import get_publication_quality_plot
plt = get_publication_quality_plot(16, 10)
for two_theta, i, hkls, d_hkl in self.get_xrd_data(
structure, two_theta_range=two_theta_range):
if two_theta_range[0] <= two_theta <= two_theta_range[1]:
label = ", ".join([str(hkl) for hkl in hkls.keys()])
plt.plot([two_theta, two_theta], [0, i], color='k',
linewidth=3, label=label)
if annotate_peaks:
plt.annotate(label, xy=[two_theta, i],
xytext=[two_theta, i], fontsize=16)
plt.xlabel(r"$2\theta$ ($^\circ$)")
plt.ylabel("Intensities (scaled)")
plt.tight_layout()
return plt
def show_xrd_plot(self, structure, two_theta_range=(0, 90),
annotate_peaks=True):
"""
Shows the XRD plot.
Args:
structure (Structure): Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks (bool): Whether to annotate the peaks with plane
information.
"""
self.get_xrd_plot(structure, two_theta_range=two_theta_range,
annotate_peaks=annotate_peaks).show()
def get_unique_families(hkls):
"""
Returns unique families of Miller indices. Families must be permutations
of each other.
Args:
hkls ([h, k, l]): List of Miller indices.
Returns:
{hkl: multiplicity}: A dict with unique hkl and multiplicity.
"""
#TODO: Definitely can be sped up.
def is_perm(hkl1, hkl2):
h1 = map(abs, hkl1)
h2 = map(abs, hkl2)
return all([i == j for i, j in zip(sorted(h1), sorted(h2))])
unique = {}
for hkl1 in hkls:
found = False
for hkl2 in unique.keys():
if is_perm(hkl1, hkl2):
found = True
unique[hkl2] += 1
break
if not found:
unique[hkl1] = 1
return unique
| mit |
kiyoto/statsmodels | statsmodels/examples/l1_demo/short_demo.py | 33 | 3737 | """
You can fit your LikelihoodModel using l1 regularization by changing
the method argument and adding an argument alpha. See code for
details.
The Story
---------
The maximum likelihood (ML) solution works well when the number of data
points is large and the noise is small. When the ML solution starts
"breaking", the regularized solution should do better.
The l1 Solvers
--------------
The standard l1 solver is fmin_slsqp and is included with scipy. It
sometimes has trouble verifying convergence when the data size is
large.
The l1_cvxopt_cp solver is part of CVXOPT and this package needs to be
installed separately. It works well even for larger data sizes.
"""
from __future__ import print_function
from statsmodels.compat.python import range
import statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
import pdb # pdb.set_trace()
## Load the data from Spector and Mazzeo (1980)
spector_data = sm.datasets.spector.load()
spector_data.exog = sm.add_constant(spector_data.exog)
N = len(spector_data.endog)
K = spector_data.exog.shape[1]
### Logit Model
logit_mod = sm.Logit(spector_data.endog, spector_data.exog)
## Standard logistic regression
logit_res = logit_mod.fit()
## Regularized regression
# Set the reularization parameter to something reasonable
alpha = 0.05 * N * np.ones(K)
# Use l1, which solves via a built-in (scipy.optimize) solver
logit_l1_res = logit_mod.fit_regularized(method='l1', alpha=alpha, acc=1e-6)
# Use l1_cvxopt_cp, which solves with a CVXOPT solver
logit_l1_cvxopt_res = logit_mod.fit_regularized(
method='l1_cvxopt_cp', alpha=alpha)
## Print results
print("============ Results for Logit =================")
print("ML results")
print(logit_res.summary())
print("l1 results")
print(logit_l1_res.summary())
print(logit_l1_cvxopt_res.summary())
### Multinomial Logit Example using American National Election Studies Data
anes_data = sm.datasets.anes96.load()
anes_exog = anes_data.exog
anes_exog = sm.add_constant(anes_exog, prepend=False)
mlogit_mod = sm.MNLogit(anes_data.endog, anes_exog)
mlogit_res = mlogit_mod.fit()
## Set the regularization parameter.
alpha = 10 * np.ones((mlogit_mod.J - 1, mlogit_mod.K))
# Don't regularize the constant
alpha[-1,:] = 0
mlogit_l1_res = mlogit_mod.fit_regularized(method='l1', alpha=alpha)
print(mlogit_l1_res.params)
#mlogit_l1_res = mlogit_mod.fit_regularized(
# method='l1_cvxopt_cp', alpha=alpha, abstol=1e-10, trim_tol=1e-6)
#print mlogit_l1_res.params
## Print results
print("============ Results for MNLogit =================")
print("ML results")
print(mlogit_res.summary())
print("l1 results")
print(mlogit_l1_res.summary())
#
#
#### Logit example with many params, sweeping alpha
spector_data = sm.datasets.spector.load()
X = spector_data.exog
Y = spector_data.endog
## Fit
N = 50 # number of points to solve at
K = X.shape[1]
logit_mod = sm.Logit(Y, X)
coeff = np.zeros((N, K)) # Holds the coefficients
alphas = 1 / np.logspace(-0.5, 2, N)
## Sweep alpha and store the coefficients
# QC check doesn't always pass with the default options.
# Use the options QC_verbose=True and disp=True
# to to see what is happening. It just barely doesn't pass, so I decreased
# acc and increased QC_tol to make it pass
for n, alpha in enumerate(alphas):
logit_res = logit_mod.fit_regularized(
method='l1', alpha=alpha, trim_mode='off', QC_tol=0.1, disp=False,
QC_verbose=True, acc=1e-15)
coeff[n,:] = logit_res.params
## Plot
plt.figure(1);plt.clf();plt.grid()
plt.title('Regularization Path');
plt.xlabel('alpha');
plt.ylabel('Parameter value');
for i in range(K):
plt.plot(alphas, coeff[:,i], label='X'+str(i), lw=3)
plt.legend(loc='best')
plt.show()
| bsd-3-clause |
tttor/csipb-jamu-prj | predictor/connectivity/similarity/protein-kernel/gene-ontology/combine_go_sim.py | 1 | 2248 | import csv
import sys
import math
import time
import numpy as np
from sklearn.preprocessing import MinMaxScaler
def main():
if len(sys.argv)!=5:
print "Usage: python combine_go_sim.py [BP] [MF] [CC] [Output]"
CCDataDir = sys.argv[3]
outDir = sys.argv[4]
BPDataDir = sys.argv[1]
MFDataDir = sys.argv[2]
print "Parsing Data"
BPSimMat,protMeta = fileToMatrix(BPDataDir)
MFSimMat,_ = fileToMatrix(MFDataDir)
CCSimMat,_ = fileToMatrix(CCDataDir)
print "Combine Matrix"
combGOSimMat = np.zeros((3334,3334),dtype=float)
m,n = combGOSimMat.shape
diagSim = []
for i in range(m):
for j in range(i,n):
combGOSimMat[i][j] = (BPSimMat[i][j]+MFSimMat[i][j]+CCSimMat[i][j])/3
if i==j:
diagSim.append(combGOSimMat[i][j])
print "Normalizing Value"
# Based on stats.stackexchange.com/questions/23397/kernel-matrix-normalisation
for i in range(m):
for j in range(i,n):
combGOSimMat[i][j] = (combGOSimMat[i][j]/math.sqrt(diagSim[i]))/math.sqrt(diagSim[j])
combGOSimMat[j][i] = combGOSimMat[i][j]
print "Writing file"
with open(outDir,'w') as f:
for i in range(m):
for j in range(n):
if j>0:
f.write(',')
f.write(str(combGOSimMat[i][j]))
f.write('\n')
with open("meta_"+outDir,'w') as f:
for prot in protMeta:
f.write(prot)
f.write('\n')
def fileToMatrix(directory):
print "Parsing " + directory
goSimMat = np.zeros((3334,3334),dtype=float)
metaList = []
with open(directory,'r') as f:
fileContent = csv.reader(f,delimiter=',', quotechar='\"')
for i,row in enumerate(fileContent):
if i>0:
for j,col in enumerate(row):
if j==0:
metaList.append(col)
else:
if col != "NA":
# print i,j
goSimMat[i-1][j-1] = float(col)
return goSimMat,metaList
if __name__=='__main__':
start_time = time.time()
main()
print "Program is running for "+str(time.time()-start_time)
| mit |
aliparsai/LittleDarwin | utils/HigherOrderExperiment/FormulaCalculator.py | 1 | 3395 | import math
from mpl_toolkits.mplot3d import axes3d
from itertools import izip
import matplotlib.pyplot as plt
import numpy as np
def calculate_formula(m, n, t):
poweroftwo = (1 - m + ((n + 1) // 2))
try:
# print m,n,t
top = math.factorial(t)
top *= math.factorial(m)
top *= math.factorial(n - t)
if poweroftwo < 0:
top *= 2 ** (-1 * poweroftwo)
bottom = math.factorial(t - m)
bottom *= math.factorial(((n + 1) // 2) - m)
bottom *= math.factorial((2 * m) - t)
if poweroftwo > 0:
bottom *= 2 ** poweroftwo
except ValueError:
return 0
return top / bottom
def calculate_probablistic(n, t):
result = int(n * (1 - (1 - (float(t) / n)) ** 2) / 2)
return result
def plot_formula():
n = 100
percentage_list = list()
for t in range(1, 100):
mv = list()
for m in range(1, 100):
mv.append(calculate_formula(m, n, t))
mv_sum = sum(mv)
percentage_list.append([int(c * 100 / mv_sum) for c in mv])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# X, Y, Z = axes3d.get_test_data(0.05)
x = y = np.arange(1, 100, 1)
X, Y = np.meshgrid(x, y)
# Y = np.array([[range(1, 100)] for _ in range(1, 100)])
Z = np.array(percentage_list)
# print percentage_list[11][20]
ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1)
plt.show()
def check_for_n(m, t, nl, nu):
for n in range(nl, nu):
print(calculate_formula(int(n * m / 200), n, int(n * t / 100)))
def get_most_likely_m(n, t):
mv = dict()
percentage_list = dict()
for m in range(1, n):
mv[m] = calculate_formula(m, n, t)
mv_sum = sum(mv.values())
print (mv_sum)
likely = 0
likely_key = None
for key in mv.keys():
percentage_list[key] = int(mv[key] * 100 / mv_sum)
if percentage_list[key] > likely:
likely = percentage_list[key]
likely_key = key
# print(percentage_list)
return likely_key, likely
def read_from_csv(rawline):
# assert isinstance(handle, file)
line = rawline.split(",")
return line[0], int(line[2]), int(line[3])
def write_to_csv(handle, name, killedfom, totalfom, killedhom, predictedkilledhom, confidence, probablistickilledhom):
assert isinstance(handle, file)
handle.write(
",".join([name, str(killedfom), str(totalfom), str(killedhom), str(predictedkilledhom),
str(confidence) + "%", str(probablistickilledhom)]))
handle.write("\n")
"""
focsv = open("./firstorder.csv", "r")
hocsv = open("./secondorder.csv", "r")
resultcsv = open("./result.csv", "w")
for fline, hline in izip(focsv, hocsv):
name, killedfom, totalfom = read_from_csv(fline.strip())
dummy, dummy2, killedhom = read_from_csv(hline.strip())
m, c = get_most_likely_m(totalfom, killedfom)
p = calculate_probablistic(totalfom, killedfom)
write_to_csv(resultcsv, name, killedfom, totalfom, killedhom, m, c, p)
# mlist = list()
# tlist = range(1, 100)
# for t in range(1, 100):
# m, c = get_most_likely_m(100, t)
# mlist.append(2 * m)
# plt.plot(tlist, mlist)
# plt.show()
focsv.close()
hocsv.close()
resultcsv.close()
"""
get_most_likely_m(100,50)
all = math.factorial(100)
all /= math.factorial(50)
all /= 2**50
print(all)
| gpl-3.0 |
kjung/scikit-learn | examples/plot_kernel_approximation.py | 19 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Generate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
Obus/scikit-learn | sklearn/utils/random.py | 234 | 10510 | # Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
centrofermi/e3sim | plot/plot_energy_cpu.py | 1 | 1300 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on 21/04/2015
@author: Fabrizio Coccetti (fabrizio.coccetti@centrofermi.it) [www.fc8.net]
"""
import matplotlib.pyplot as plt
import numpy as np
import os
import pkg_resources
from e3sim.config.specific_machine import machine
try:
# For Python 3.0 and later
import configparser
except ImportError:
# Fall back to Python 2
import ConfigParser
# Reading file_location.ini
try:
parser = configparser.ConfigParser()
except:
parser = ConfigParser.ConfigParser()
parser.read(
pkg_resources.resource_filename(
'e3sim',
os.path.join('config', 'files_location.ini')))
outputDir = parser.get(machine, 'outputDir')
resultFile = parser.get(machine, 'resultFile')
data = np.loadtxt(os.path.join(outputDir, resultFile), skiprows=1)
# data = np.loadtxt('energy_vs_mu.dat', skiprows=1)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(data[:, 0], data[:, 1], 'ro')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(-1.E2, 1.E9)
ax.set_ylim(-100, 1.E6)
# ? ax.xcorr(x, y, usevlines=True, maxlags=50, normed=True, lw=2)
ax.grid(True)
ax.axhline(0, color='black', lw=2)
ax.set_title('Coriska Simulation CPU time')
ax.set_xlabel('Energy of the Primary (GeV)')
ax.set_ylabel('CPU time (s)')
plt.show()
| gpl-3.0 |
MechCoder/scikit-learn | examples/manifold/plot_mds.py | 88 | 2731 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
suchyta1/BalrogReconstruction | functions2.py | 1 | 15679 | #!/usr/bin/env python
import desdb
import numpy as np
import esutil
import pyfits
import sys
import healpy as hp
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
#import seaborn as sns
def CatMatch(c1, c2, band1, band2):
radius = 1/3600.0
m1, m2, d12 = esutil.htm.HTM().match(c1['ra_%s'%(band1)],c1['dec_%s'%(band1)], c2['ra_%s'%(band2)],c2['dec_%s'%(band2)], radius)
rm2, rm1, rd12 = esutil.htm.HTM().match(c2['ra_%s'%(band2)],c2['dec_%s'%(band2)], c1['ra_%s'%(band1)],c1['dec_%s'%(band1)], radius)
dtype = [('1',np.float32), ('2', np.float32)]
m = np.empty( len(m1), dtype=dtype)
m['1'] = m1
m['2'] = m2
r = np.empty( len(rm1), dtype=dtype)
r['1'] = rm1
r['2'] = rm2
cut = np.in1d(m, r)
return c1[m1[cut]], c2[m2[cut]]
def GetDepthMap(depth_file):
map = hp.read_map(depth_file, nest=True)
nside = hp.npix2nside(map.size)
return map, nside
def GetPhi(ra):
return ra * np.pi / 180.0
def GetRa(phi):
return phi*180.0/np.pi
def GetTheta(dec):
return (90.0 - dec) * np.pi / 180.0
def GetDec(theta):
return 90.0 - theta*180.0/np.pi
def GetRaDec(theta, phi):
return [GetRa(phi), GetDec(theta)]
def GetPix(nside, ra, dec, nest=True):
phi = GetPhi(ra)
theta = GetTheta(dec)
pix = hp.ang2pix(nside, theta, phi, nest=nest)
return pix
def GetDepthCut(map, nside, ra, dec, depth = 50.0):
pix = GetPix(nside, ra, dec)
depths = map[pix]
ok_depths = (depths > 0 ) & (depths < depth)
#ok_depths = (depths > 0 )
return ok_depths
def ValidDepth(map, nside, arr, rakey='ra', deckey='dec', depth = 50.0):
ok_depths = GetDepthCut(map, nside, arr[rakey], arr[deckey], depth = depth)
arr = arr[ok_depths]
return arr
def InSurvey(map, nside, ra, dec):
ok_depths = GetDepthCut(map, nside, ra, dec)
return ok_depths
def InTile(data, ura, udec, rakey='ra', deckey='dec'):
inside = (data[rakey] > ura[0]) & (data[rakey] < ura[1]) & (data[deckey] > udec[0]) & (data[deckey] < udec[1])
return inside
def RemoveTileOverlap(tilestuff, data, col='tilename', rakey='ra', deckey='dec'):
datatile = data[col]
tiles = np.unique(datatile)
keep = np.zeros( len(data), dtype=np.bool_)
for tile in tiles:
cut = (datatile==tile)
entry = tilestuff[tile]
ura = (entry['urall'], entry['uraur'])
udec = (entry['udecll'], entry['udecur'])
u = InTile(data[cut], ura, udec, rakey=rakey, deckey=deckey)
keep[cut] = u
return data[keep]
def in_tile(ra, dec, ura, udec):
inside = (ra > ura[0]) & (ra < ura[1]) & (dec > udec[0]) & (dec < udec[1])
return inside
def hpInTiles(tiles, tileinfo, data, depthmap, depth_nside, max, out_nside):
lims = []
num = np.empty(len(data))
for i in range(len(data)):
ra = data[i][0]
dec = data[i][1]
found = np.zeros(len(ra), dtype=np.bool_)
for tile in tiles:
entry = tileinfo[tile]
ura = (entry['urall'], entry['uraur'])
udec = (entry['udecll'], entry['udecur'])
inc = (in_tile(ra,dec, ura,udec) & InSurvey(depthmap, depth_nside, ra, dec))
found = (found | inc )
if i==0:
lims.append( [ura, udec] )
num[i] = np.sum(found) / max * hp.nside2pixarea(out_nside, degrees=True)
return num, lims
def EqualNumBinning(arr, num=5000, join='max'):
a = np.sort(arr)
size = len(a)
r = size % num
n = size / num
if r!=0:
n += 1
if join=='min' and r!=0:
a = a[::-1]
a = np.array_split(a, n)
if r!=0:
a[-2] = np.append(a[-2], a[-1])
a = a[:-1]
if join=='min' and r!=0:
a = a[::-1]
first = -1
last = 0
else:
first = 0
last = -1
bins = [a[0][first]]
nums = [len(a[0])]
for i in range(len(a)-1):
btwn = (a[i][last] + a[i+1][first]) / 2.0
bins.append(btwn)
nums.append(len(a[i+1]))
bins.append(a[-1][last])
bins = np.array(bins)
nums = np.array(nums)
db = np.diff(bins)
p = nums / (len(arr) * db)
return p, bins
def CorrectColorDistribution(balrog_truth, balrog_sim, des):
'''
sim_pdf, sim_bins = EqualNumBinning(sim['gr'])
sim_c = (sim_bins[1:] + sim_bins[:-1]) / 2.0
plt.figure(2)
#plt.plot(sim_c, sim_pdf)
d = np.diff(sim_bins)
plt.bar(sim_bins[:-1], sim_pdf, width=d)
'''
gr_des_bins = np.linspace(-5.5, 0.5, num=50, endpoint=True)
iz_des_bins = np.linspace(0.1, 5.5, num=50, endpoint=True)
fig = plt.figure(2, figsize=(12,4))
ax = fig.add_subplot(1,3, 1)
des_hist, xbins, ybins = np.histogram2d(des['iz'], des['gr'], bins=[iz_des_bins, gr_des_bins])
ax.imshow(np.log10(des_hist.transpose()), extent=[0.1,5.5, -5.5,0.5], origin='lower', interpolation='nearest')
ax.set_title(r'DES Observed')
ax.set_xlabel(r'i-z')
ax.set_ylabel(r'g-r')
ax.plot( [0.0, 2.8], [-2.05, 0.5], color='black')
ax.set_xlim([0.1,2.9])
ax.set_ylim([-2.1,0.5])
ax = fig.add_subplot(1,3, 2)
sim_hist, xbins, ybins = np.histogram2d(balrog_sim['iz'], balrog_sim['gr'], bins=[iz_des_bins, gr_des_bins])
ax.imshow(np.log10(sim_hist.transpose()), extent=[0.1,5.5, -5.5,0.5], origin='lower', interpolation='nearest')
ax.set_title(r'Balrog Observed')
ax.set_xlabel(r'i-z')
ax.set_ylabel(r'g-r')
ax.plot( [0.0, 2.8], [-2.05, 0.5], color='black')
ax.set_xlim([0.1,2.9])
ax.set_ylim([-2.1,0.5])
for i in range(len(sim_hist)):
for j in range(len(sim_hist[i])):
if sim_hist[i][j]==0 and des_hist[i][j]!=0:
print sim_hist[i][j], des_hist[i][j]
gr_truth_bins = np.linspace(-1, 4, num=80)
iz_truth_bins = np.linspace(-1, 2, num=80)
ax = fig.add_subplot(1,3, 3)
truth_hist, xbins, ybins = np.histogram2d(balrog_sim['truth_iz'], balrog_sim['truth_gr'], bins=[iz_truth_bins, gr_truth_bins])
ax.imshow(np.log10(truth_hist.transpose()), extent=[-1,2, -1,4], origin='lower', interpolation='nearest')
ax.set_title(r'Balrog Truth')
ax.set_xlabel(r'i-z')
ax.set_ylabel(r'g-r')
#ax.set_xticks(np.arange(-1,2,1))
majorLocator = MultipleLocator(1)
minorLocator = MultipleLocator(0.2)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_minor_locator(minorLocator)
ax.set_ylim([-1, 2.5])
plt.tight_layout()
def CorrectMags(des_info, balrog_info, truth_info, inds, band='i'):
mag_bins = np.arange(16,27, 0.1)
tmag_bins = np.arange(18,24, 0.1)
des_corr = []
err_des_corr = []
for k in range(len(des_info)):
des = des_info[k]
balrog = balrog_info[k]
truth = truth_info[k]
#m = 'mag_%s' %(band)
#tm = 'truth_mag_%s' %(band)
m = 'mag'
tm = 'truth_mag'
des_hist_corr = np.zeros( len(tmag_bins)-1 )
err_des_hist_corr = np.zeros( len(tmag_bins)-1 )
des_hist, mag_bins = np.histogram(des[m], bins=mag_bins)
balrog_hist, mag_bins = np.histogram(balrog[m],bins=mag_bins)
tbalrog_hist, tmag_bins = np.histogram(balrog[tm], bins=tmag_bins)
for j in range(len(mag_bins)-1):
des_num = des_hist[j]
cut = np.zeros(len(balrog), dtype=np.bool_)
f = 0
while np.sum(cut)==0:
dm = f * (mag_bins[j+1] - mag_bins[j])
cut = (balrog[m] > (mag_bins[j] - dm)) & (balrog[m] < (mag_bins[j+1] + dm))
f += 1
balrog_inbin = balrog[cut]
truth_hist, tmag_bins = np.histogram(balrog_inbin[tm], bins=tmag_bins)
size = float(len(balrog_inbin))
des_hist_corr = des_hist_corr + (truth_hist / size) * des_num
if des_num > 0:
frac = np.sqrt( 1.0/des_num + 1.0/size )
else:
frac = 0
err_des_hist_corr = err_des_hist_corr + truth_hist * (frac / size * des_num)
completeness = np.zeros( des_hist_corr.shape )
err_completeness = np.zeros( err_des_hist_corr.shape )
p = False
for j in range(len(tmag_bins)-1):
f = 0
cut = np.zeros(len(truth), dtype=np.bool_)
while np.sum(cut)==0:
dm = f * (tmag_bins[j+1] - tmag_bins[j])
cut = (truth[tm] > (tmag_bins[j]-dm)) & (truth[tm] < (tmag_bins[j+1]+dm))
f += 1
if tmag_bins[j] > 21:
break
den = float( np.sum(cut) )
n = np.in1d(balrog['balrog_index'], truth[cut]['balrog_index'])
num = np.sum(n)
if den > 0:
comp = num / den
completeness[j] = comp
err_completeness[j] = np.sqrt(num)/den
if (completeness[j]==0) and (not p) and tmag_bins[j]>21:
print k, inds[k], tmag_bins[j], len(des)
p = True
corr = np.zeros( des_hist_corr.shape )
err_corr = np.zeros( des_hist_corr.shape )
cut = (completeness > 0)
corr[cut] = des_hist_corr[cut]/completeness[cut]
cut = (completeness > 0) & (des_hist_corr > 0)
t1 = err_des_hist_corr[cut]/des_hist_corr[cut]
t2 = err_completeness[cut]/completeness[cut]
frac = np.sqrt( t1*t1 + t2*t2 )
err_corr[cut] = frac * corr[cut]
des_corr.append(np.sum(corr))
err_des_corr.append( np.sqrt(sum(err_corr*err_corr)) )
if k==40:
plt.figure(10)
plt.plot(tmag_bins[:-1], des_hist_corr, color='blue')
plt.plot(mag_bins[:-1], des_hist, color='red')
plt.plot(mag_bins[:-1], balrog_hist, color='green')
plt.plot(tmag_bins[:-1], corr, color='cyan')
plt.figure(11)
plt.plot(tmag_bins[:-1], completeness, color='blue')
return des_corr, err_des_corr
def CorrectColors(des_info, balrog_info, truth_info):
gr_o_bins = np.linspace(-5.5, 0.5, num=50, endpoint=True)
iz_o_bins = np.linspace(0.1, 5.5, num=50, endpoint=True)
gr_t_bins = np.linspace(-1, 4, num=80)
iz_t_bins = np.linspace(-1, 2, num=80)
des_corr = []
#plus = 51
#for k in range(len(des_info[plus:(plus+1)])):
for k in range(len(des_info)):
des = des_info[k]
balrog = balrog_info[k]
truth = truth_info[k]
des_hist_corr = np.zeros( (len(iz_t_bins)-1, len(gr_t_bins)-1) )
des_hist, iz_o_bins, gr_o_bins = np.histogram2d(des['iz'], des['gr'], bins=[iz_o_bins, gr_o_bins])
balrog_hist, iz_o_bins, gr_o_bins = np.histogram2d(balrog['iz'], balrog['gr'], bins=[iz_o_bins, gr_o_bins])
tbalrog_hist, iz_t_bins, gr_t_bins = np.histogram2d(balrog['truth_iz'], balrog['truth_gr'], bins=[iz_t_bins, gr_t_bins])
for j in range(len(iz_o_bins)-1):
for i in range(len(gr_o_bins)-1):
des_num = des_hist[j][i]
cut = np.zeros(len(balrog), dtype=np.bool_)
f = 0
while np.sum(cut)==0:
d_gr = f * (gr_o_bins[i+1] - gr_o_bins[i])
d_iz = f * (iz_o_bins[j+1] - iz_o_bins[j])
cut = ( (balrog['gr'] > (gr_o_bins[i] - d_gr)) & (balrog['gr'] < (gr_o_bins[i+1] + d_gr)) & (balrog['iz'] > (iz_o_bins[j] - d_iz)) & (balrog['iz'] < (iz_o_bins[j+1] + d_iz)) )
f += 1
balrog_inbin = balrog[cut]
truth_hist, iz_t_bins, gr_t_bins = np.histogram2d(balrog_inbin['truth_iz'], balrog_inbin['truth_gr'], bins=[iz_t_bins,gr_t_bins])
dt_iz = np.diff(iz_t_bins)
dt_gr = np.diff(gr_t_bins)
des_hist_corr = des_hist_corr + (truth_hist / float(len(balrog_inbin))) * des_num
completeness = np.zeros( des_hist_corr.shape )
for j in range(len(iz_t_bins)-1):
for i in range(len(gr_t_bins)-1):
cut = (truth['truth_gr'] > gr_t_bins[i]) & (truth['truth_gr'] < gr_t_bins[i+1]) & (truth['truth_iz'] > iz_t_bins[j]) & (truth['truth_iz'] < iz_t_bins[j+1])
den = float( np.sum(cut) )
n = np.in1d(balrog['balrog_index'], truth[cut]['balrog_index'])
num = np.sum(n)
if den > 0:
comp = num / den
completeness[j][i] = comp
corr = np.zeros( des_hist_corr.shape )
cut = (completeness > 0)
corr[cut] = des_hist_corr[cut]/completeness[cut]
des_corr.append(np.sum(corr))
#des_hist_corr
"""
fig = plt.figure(3, figsize=(16,8))
ax = fig.add_subplot(2,3, 1)
cax = ax.imshow(np.log10(des_hist.transpose()), extent=[0.1,5.5, -5.5,0.5], origin='lower', interpolation='nearest', cmap=mpl.cm.binary)
cbar = fig.colorbar(cax)
ax.set_title(r'DES Observed')
ax.set_xlabel(r'i-z')
ax.set_ylabel(r'g-r')
ax.set_xlim([0.1,2.9])
ax.set_ylim([-2.1,0.5])
ax = fig.add_subplot(2,3, 2)
cax = ax.imshow(np.log10(des_hist_corr.transpose()), extent=[-1,2, -1,4], origin='lower', interpolation='nearest', cmap=mpl.cm.binary)
ax.set_title(r'DES, color corrected')
ax.set_xlabel(r'i-z')
ax.set_ylabel(r'g-r')
majorLocator = MultipleLocator(1)
minorLocator = MultipleLocator(0.2)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_minor_locator(minorLocator)
ax.set_ylim([-1, 2.5])
cbar = fig.colorbar(cax)
ax = fig.add_subplot(2,3, 3)
cax = ax.imshow(np.log10(corr.transpose()), extent=[-1,2, -1,4], origin='lower', interpolation='nearest', cmap=mpl.cm.binary)
ax.set_title(r'DES, color/comp. corrected')
ax.set_xlabel(r'i-z')
ax.set_ylabel(r'g-r')
majorLocator = MultipleLocator(1)
minorLocator = MultipleLocator(0.2)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_minor_locator(minorLocator)
ax.set_ylim([-1, 2.5])
cbar = fig.colorbar(cax)
ax = fig.add_subplot(2,3, 4)
cax = ax.imshow(np.log10(balrog_hist.transpose()), extent=[0.1,5.5, -5.5,0.5], origin='lower', interpolation='nearest', cmap=mpl.cm.binary)
ax.set_title(r'Balrog Observed')
ax.set_xlabel(r'i-z')
ax.set_ylabel(r'g-r')
ax.set_xlim([0.1,2.9])
ax.set_ylim([-2.1,0.5])
cbar = fig.colorbar(cax)
ax = fig.add_subplot(2,3, 5)
cax = ax.imshow(np.log10(tbalrog_hist.transpose()), extent=[-1,2, -1,4], origin='lower', interpolation='nearest', cmap=mpl.cm.binary)
ax.set_title(r'Balrog Truth')
ax.set_xlabel(r'i-z')
ax.set_ylabel(r'g-r')
majorLocator = MultipleLocator(1)
minorLocator = MultipleLocator(0.2)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_minor_locator(minorLocator)
ax.set_ylim([-1, 2.5])
cbar = fig.colorbar(cax)
ax = fig.add_subplot(2,3, 6)
cax = ax.imshow(np.log10(completeness.transpose()), extent=[-1,2, -1,4], origin='lower', interpolation='nearest', cmap=mpl.cm.binary)
ax.set_title(r'Completeness')
ax.set_xlabel(r'i-z')
ax.set_ylabel(r'g-r')
majorLocator = MultipleLocator(1)
minorLocator = MultipleLocator(0.2)
ax.xaxis.set_major_locator(majorLocator)
ax.xaxis.set_minor_locator(minorLocator)
ax.set_ylim([-1, 2.5])
cbar = fig.colorbar(cax)
plt.tight_layout()
"""
return des_corr
| mit |
Tong-Chen/scikit-learn | sklearn/tests/test_common.py | 1 | 44279 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <amueller@ais.uni-bonn.de>
# Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import traceback
import inspect
import pickle
import pkgutil
import numpy as np
from scipy import sparse
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import meta_estimators
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
import sklearn
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin)
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import (load_iris, load_boston, make_blobs,
make_classification)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.svm.base import BaseLibSVM
from sklearn.cross_validation import train_test_split
from sklearn.utils.validation import DataConversionWarning
dont_test = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder', 'TfidfTransformer',
'IsotonicRegression', 'OneHotEncoder',
'RandomTreesEmbedding', 'FeatureHasher', 'DummyClassifier',
'DummyRegressor', 'TruncatedSVD', 'PolynomialFeatures']
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
classifier = LDA()
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
if name in dont_test:
continue
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in meta_estimators:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(isinstance(estimator.set_params(), Estimator))
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
continue
params = estimator.get_params()
if name in meta_estimators:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
continue
for arg, default in zip(args, defaults):
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def test_all_estimator_no_base_class():
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_estimators_sparse_data():
# All estimators should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
estimators = all_estimators()
estimators = [(name, Estimator) for name, Estimator in estimators
if issubclass(Estimator, (ClassifierMixin, RegressorMixin))]
for name, Classifier in estimators:
if name in dont_test:
continue
# catch deprecation warnings
with warnings.catch_warnings():
classifier = Classifier()
# fit and predict
try:
classifier.fit(X, y)
classifier.predict(X)
if hasattr(classifier, 'predict_proba'):
try:
classifier.predict_proba(X)
except NotImplementedError:
pass
except TypeError as e:
if not 'sparse' in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def test_transformers():
# test if transformers do something sensible on training set
# also test all shapes / shape errors
transformers = all_estimators(type_filter='transformer')
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
n_samples, n_features = X.shape
X = StandardScaler().fit_transform(X)
X -= X.min()
succeeded = True
for name, Transformer in transformers:
if name in dont_test:
continue
# these don't actually fit the data:
if name in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
if hasattr(transformer, 'compute_importances'):
transformer.compute_importances = True
if name == 'SelectKBest':
# SelectKBest has a default of k=10
# which is more feature than we have.
transformer.k = 1
elif name in ['GaussianRandomProjection',
'SparseRandomProjection']:
# Due to the jl lemma and very few samples, the number
# of components of the random matrix projection will be greater
# than the number of features.
# So we impose a smaller number (avoid "auto" mode)
transformer.n_components = 1
elif name == "MiniBatchDictionaryLearning":
transformer.set_params(n_iter=5) # default = 1000
elif name == "KernelPCA":
transformer.remove_zero_eig = False
# fit
if name in ('PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD'):
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
try:
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
except Exception as e:
print(transformer)
print(e)
print()
succeeded = False
continue
if hasattr(transformer, 'transform'):
if name in ('PLSCanonical', 'PLSRegression', 'CCA',
'PLSSVD'):
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform not correct in %s" % Transformer)
assert_array_almost_equal(
x_pred3, x_pred2, 2,
"fit_transform not correct in %s" % Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform not correct in %s" % Transformer)
assert_array_almost_equal(
X_pred3, X_pred2, 2,
"fit_transform not correct in %s" % Transformer)
# raises error on malformed input for transform
assert_raises(ValueError, transformer.transform, X.T)
assert_true(succeeded)
def test_transformers_sparse_data():
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
estimators = all_estimators(type_filter='transformer')
for name, Transformer in estimators:
if name in dont_test:
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
if name in ['Scaler', 'StandardScaler']:
transformer = Transformer(with_mean=False)
elif name in ['GaussianRandomProjection',
'SparseRandomProjection']:
# Due to the jl lemma and very few samples, the number
# of components of the random matrix projection will be greater
# than the number of features.
# So we impose a smaller number (avoid "auto" mode)
transformer = Transformer(n_components=np.int(X.shape[1] / 4))
else:
transformer = Transformer()
# fit
try:
transformer.fit(X, y)
except TypeError as e:
if not 'sparse' in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def test_estimators_nan_inf():
# Test that all estimators check their input for NaN's and infs
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
estimators = all_estimators()
estimators = [(name, E) for name, E in estimators
if (issubclass(E, ClassifierMixin) or
issubclass(E, RegressorMixin) or
issubclass(E, TransformerMixin) or
issubclass(E, ClusterMixin))]
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
for name, Estimator in estimators:
if name in dont_test:
continue
if name in ('PLSCanonical', 'PLSRegression', 'CCA',
'PLSSVD', 'Imputer'): # Imputer accepts nan
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
if name in ['GaussianRandomProjection',
'SparseRandomProjection']:
# Due to the jl lemma and very few samples, the number
# of components of the random matrix projection will be
# greater
# than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator = Estimator(n_components=1)
set_random_state(estimator, 1)
# try to fit
try:
if issubclass(Estimator, ClusterMixin):
estimator.fit(X_train)
else:
estimator.fit(X_train, y)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
if issubclass(Estimator, ClusterMixin):
# All estimators except clustering algorithm
# support fitting with (optional) y
estimator.fit(X_train_finite)
else:
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def test_transformers_pickle():
# test if transformers do something sensible on training set
# also test all shapes / shape errors
transformers = all_estimators(type_filter='transformer')
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
n_samples, n_features = X.shape
X = StandardScaler().fit_transform(X)
X -= X.min()
succeeded = True
for name, Transformer in transformers:
if name in dont_test:
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
if not hasattr(transformer, 'transform'):
continue
set_random_state(transformer)
if hasattr(transformer, 'compute_importances'):
transformer.compute_importances = True
if name == "SelectKBest":
# SelectKBest has a default of k=10
# which is more feature than we have.
transformer.k = 1
elif name in ['GaussianRandomProjection', 'SparseRandomProjection']:
# Due to the jl lemma and very few samples, the number
# of components of the random matrix projection will be greater
# than the number of features.
# So we impose a smaller number (avoid "auto" mode)
transformer.n_components = 1
# fit
if name in ('PLSCanonical', 'PLSRegression', 'CCA',
'PLSSVD'):
random_state = np.random.RandomState(seed=12345)
y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit(X, y_).transform(X)
pickled_transformer = pickle.dumps(transformer)
unpickled_transformer = pickle.loads(pickled_transformer)
pickled_X_pred = unpickled_transformer.transform(X)
try:
assert_array_almost_equal(pickled_X_pred, X_pred)
except Exception as exc:
succeeded = False
print ("Transformer %s doesn't predict the same value "
"after pickling" % name)
raise exc
assert_true(succeeded)
def test_classifiers_one_label():
# test classifiers trained on a single label always return this label
# or raise an sensible error message
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
classifiers = all_estimators(type_filter='classifier')
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
for name, Classifier in classifiers:
if name in dont_test:
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if not 'class' in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
continue
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
traceback.print_exc(file=sys.stdout)
def test_clustering():
# test if clustering algorithms do something sensible
# also test all shapes / shape errors
clustering = all_estimators(type_filter='cluster')
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=7)
n_samples, n_features = X.shape
X = StandardScaler().fit_transform(X)
for name, Alg in clustering:
if name == 'WardAgglomeration':
# this is clustering on the features
# let's not test that here.
continue
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
# fit
alg.fit(X)
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
continue
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def test_classifiers_train():
# test if classifiers do something sensible on training set
# also test all shapes / shape errors
classifiers = all_estimators(type_filter='classifier')
X_m, y_m = make_blobs(random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# do it once with binary, once with multiclass
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
for name, Classifier in classifiers:
if name in dont_test:
continue
if name in ['MultinomialNB', 'BernoulliNB']:
# TODO also test these!
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
assert_greater(accuracy_score(y, y_pred), 0.85)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict:
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.ravel().shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
try:
# predict_proba agrees with predict:
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(
np.sum(y_prob, axis=1), np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
except NotImplementedError:
pass
def test_classifiers_classes():
# test if classifiers can cope with non-consecutive classes
classifiers = all_estimators(type_filter='classifier')
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=1)
X = StandardScaler().fit_transform(X)
y_names = iris.target_names[y]
for y_names in [y_names, y_names.astype('O')]:
for name, Classifier in classifiers:
if name in dont_test:
continue
if name in ['MultinomialNB', 'BernoulliNB']:
# TODO also test these!
continue
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
# fit
try:
classifier.fit(X, y_)
except Exception as e:
print(e)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
accuracy = accuracy_score(y_, y_pred)
assert_greater(accuracy, 0.78,
"accuracy %f of %s not greater than 0.78"
% (accuracy, name))
#assert_array_equal(
#clf.classes_, classes,
#"Unexpected classes_ attribute for %r" % clf)
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def test_classifiers_input_shapes():
# test if classifiers can cope with y.shape = (n_samples, 1)
classifiers = all_estimators(type_filter='classifier')
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=1)
X = StandardScaler().fit_transform(X)
for name, Classifier in classifiers:
if name in dont_test:
continue
if name in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
continue
if name in ["DecisionTreeClassifier", "ExtraTreeClassifier"]:
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_random_state(classifier)
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
set_random_state(classifier)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
classifier.fit(X, y[:, np.newaxis])
try:
assert_equal(len(w), 1)
assert_array_equal(y_pred, classifier.predict(X))
except Exception:
print(classifier)
raise
def test_classifiers_pickle():
# test if classifiers do something sensible on training set
# also test all shapes / shape errors
classifiers = all_estimators(type_filter='classifier')
X_m, y_m = make_blobs(random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
succeeded = True
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# do it once with binary, once with multiclass
n_samples, n_features = X.shape
for name, Classifier in classifiers:
if name in dont_test:
continue
if name in ['MultinomialNB', 'BernoulliNB']:
# TODO also test these!
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
pickled_classifier = pickle.dumps(classifier)
unpickled_classifier = pickle.loads(pickled_classifier)
pickled_y_pred = unpickled_classifier.predict(X)
try:
assert_array_almost_equal(pickled_y_pred, y_pred)
except Exception as exc:
succeeded = False
print ("Estimator %s doesn't predict the same value "
"after pickling" % name)
raise exc
assert_true(succeeded)
BOSTON = None
def _boston_subset():
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:200], y[:200]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def test_regressors_int():
# test if regressors can cope with integer labels (by converting them to
# float)
regressors = all_estimators(type_filter='regressor')
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
for name, Regressor in regressors:
if name in dont_test or name in ('CCA'):
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in ('_PLS', 'PLSCanonical', 'PLSRegression'):
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def test_regressors_train():
regressors = all_estimators(type_filter='regressor')
# TODO: test with intercept
# TODO: test with multiple responses
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
rnd = np.random.RandomState(0)
succeeded = True
for name, Regressor in regressors:
if name in dont_test:
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
try:
if name in ('PLSCanonical', 'PLSRegression', 'CCA'):
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
regressor.fit(X, y_)
regressor.predict(X)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
except Exception as e:
print(regressor)
print(e)
print()
succeeded = False
assert_true(succeeded)
def test_regressor_pickle():
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
regressors = all_estimators(type_filter='regressor')
X, y = _boston_subset()
# TODO: test with intercept
# TODO: test with multiple responses
y = StandardScaler().fit_transform(y) # X is already scaled
rnd = np.random.RandomState(0)
succeeded = True
for name, Regressor in regressors:
if name in dont_test:
continue
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name in ('PLSCanonical', 'PLSRegression', 'CCA'):
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
regressor.fit(X, y_)
y_pred = regressor.predict(X)
# store old predictions
pickled_regressor = pickle.dumps(regressor)
unpickled_regressor = pickle.loads(pickled_regressor)
pickled_y_pred = unpickled_regressor.predict(X)
try:
assert_array_almost_equal(pickled_y_pred, y_pred)
except Exception as exc:
succeeded = False
print ("Estimator %s doesn't predict the same value "
"after pickling" % name)
raise exc
assert_true(succeeded)
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
exec(open('setup.py').read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_classifiers():
# test that class_weight works and that the semantics are consistent
classifiers = all_estimators(type_filter='classifier')
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
for name, Classifier in classifiers:
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
continue
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
continue
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.9)
def test_class_weight_auto_classifies():
# test that class_weight="auto" improves f1-score
classifiers = all_estimators(type_filter='classifier')
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for n_classes, weights in zip([2, 3], [[.8, .2], [.8, .1, .1]]):
# create unbalanced dataset
X, y = make_classification(n_classes=n_classes, n_samples=200,
n_features=10, weights=weights,
random_state=0, n_informative=n_classes)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
for name, Classifier in classifiers:
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
continue
if name.startswith("RidgeClassifier"):
# RidgeClassifier behaves unexpected
# FIXME!
continue
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
continue
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='auto')
classifier.fit(X_train, y_train)
y_pred_auto = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_auto),
f1_score(y_test, y_pred))
def test_estimators_overwrite_params():
# test whether any classifier overwrites his init parameters during fit
for est_type in ["classifier", "regressor", "transformer"]:
estimators = all_estimators(type_filter=est_type)
X, y = make_blobs(random_state=0, n_samples=9)
# some want non-negative input
X -= X.min()
for name, Estimator in estimators:
if (name in dont_test
or name in ['CCA', '_CCA', 'PLSCanonical',
'PLSRegression',
'PLSSVD', 'GaussianProcess']):
# FIXME!
# in particular GaussianProcess!
continue
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
if hasattr(estimator, 'batch_size'):
# FIXME
# for MiniBatchDictLearning
estimator.batch_size = 1
if name in ['GaussianRandomProjection',
'SparseRandomProjection']:
# Due to the jl lemma and very few samples, the number
# of components of the random matrix projection will be
# greater
# than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator = Estimator(n_components=1)
set_random_state(estimator)
params = estimator.get_params()
estimator.fit(X, y)
new_params = estimator.get_params()
for k, v in params.items():
assert_false(np.any(new_params[k] != v),
"Estimator %s changes its parameter %s"
" from %s to %s during fit."
% (name, k, v, new_params[k]))
def test_cluster_overwrite_params():
# test whether any classifier overwrites his init parameters during fit
clusterers = all_estimators(type_filter="cluster")
X, y = make_blobs(random_state=0, n_samples=9)
# some want non-negative input
X
for name, Clustering in clusterers:
with warnings.catch_warnings(record=True):
# catch deprecation warnings
clustering = Clustering()
params = clustering.get_params()
clustering.fit(X)
new_params = clustering.get_params()
for k, v in params.items():
assert_false(np.any(new_params[k] != v),
"Estimator %s changes its parameter %s"
" from %s to %s during fit."
% (name, k, v, new_params[k]))
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
for importer, modname, ispkg in pkgs:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{}' has no attribute '{}'".format(
modname, name))
def test_sparsify_estimators():
"""Test if predict with sparsified estimators works.
Tests regression, binary classification, and multi-class classification.
"""
estimators = all_estimators()
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
# test regression and binary classification
for name, Estimator in estimators:
try:
Estimator.sparsify
except:
continue
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# test multiclass classification
classifiers = all_estimators(type_filter='classifier')
y[-1] = 3 # make multi-class
for name, Classifier in classifiers:
try:
Classifier.sparsify
except:
continue
est = Classifier()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
| bsd-3-clause |
pprett/scikit-learn | sklearn/feature_selection/tests/test_from_model.py | 26 | 6935 | import numpy as np
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import skip_if_32bit
from sklearn import datasets
from sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso
from sklearn.svm import LinearSVC
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.utils.fixes import norm
iris = datasets.load_iris()
data, y = iris.data, iris.target
rng = np.random.RandomState(0)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
for threshold in ["gobbledigook", ".5 * gobbledigook"]:
model = SelectFromModel(clf, threshold=threshold)
model.fit(data, y)
assert_raises(ValueError, model.transform, data)
def test_input_estimator_unchanged():
"""
Test that SelectFromModel fits on a clone of the estimator.
"""
est = RandomForestClassifier()
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
assert_true(transformer.estimator is est)
@skip_if_32bit
def test_feature_importances():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0)
est = RandomForestClassifier(n_estimators=50, random_state=0)
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
transformer = SelectFromModel(estimator=est, threshold=threshold)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'feature_importances_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
importances = transformer.estimator_.feature_importances_
feature_mask = np.abs(importances) > func(importances)
assert_array_almost_equal(X_new, X[:, feature_mask])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
est = RandomForestClassifier(n_estimators=50, random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(X, y, sample_weight=sample_weight)
importances = transformer.estimator_.feature_importances_
transformer.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = transformer.estimator_.feature_importances_
assert_almost_equal(importances, importances_bis)
# For the Lasso and related models, the threshold defaults to 1e-5
transformer = SelectFromModel(estimator=Lasso(alpha=0.1))
transformer.fit(X, y)
X_new = transformer.transform(X)
mask = np.abs(transformer.estimator_.coef_) > 1e-5
assert_array_equal(X_new, X[:, mask])
@skip_if_32bit
def test_feature_importances_2d_coef():
X, y = datasets.make_classification(
n_samples=1000, n_features=10, n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False, random_state=0, n_classes=4)
est = LogisticRegression()
for threshold, func in zip(["mean", "median"], [np.mean, np.median]):
for order in [1, 2, np.inf]:
# Fit SelectFromModel a multi-class problem
transformer = SelectFromModel(estimator=LogisticRegression(),
threshold=threshold,
norm_order=order)
transformer.fit(X, y)
assert_true(hasattr(transformer.estimator_, 'coef_'))
X_new = transformer.transform(X)
assert_less(X_new.shape[1], X.shape[1])
# Manually check that the norm is correctly performed
est.fit(X, y)
importances = norm(est.coef_, axis=0, ord=order)
feature_mask = importances > func(importances)
assert_array_equal(X_new, X[:, feature_mask])
def test_partial_fit():
est = PassiveAggressiveClassifier(random_state=0, shuffle=False)
transformer = SelectFromModel(estimator=est)
transformer.partial_fit(data, y,
classes=np.unique(y))
old_model = transformer.estimator_
transformer.partial_fit(data, y,
classes=np.unique(y))
new_model = transformer.estimator_
assert_true(old_model is new_model)
X_transform = transformer.transform(data)
transformer.fit(np.vstack((data, data)), np.concatenate((y, y)))
assert_array_equal(X_transform, transformer.transform(data))
def test_calling_fit_reinitializes():
est = LinearSVC(random_state=0)
transformer = SelectFromModel(estimator=est)
transformer.fit(data, y)
transformer.set_params(estimator__C=100)
transformer.fit(data, y)
assert_equal(transformer.estimator_.C, 100)
def test_prefit():
"""
Test all possible combinations of the prefit parameter.
"""
# Passing a prefit parameter with the selected model
# and fitting a unfit model with prefit=False should give same results.
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf)
model.fit(data, y)
X_transform = model.transform(data)
clf.fit(data, y)
model = SelectFromModel(clf, prefit=True)
assert_array_equal(model.transform(data), X_transform)
# Check that the model is rewritten if prefit=False and a fitted model is
# passed
model = SelectFromModel(clf, prefit=False)
model.fit(data, y)
assert_array_equal(model.transform(data), X_transform)
# Check that prefit=True and calling fit raises a ValueError
model = SelectFromModel(clf, prefit=True)
assert_raises(ValueError, model.fit, data, y)
def test_threshold_string():
est = RandomForestClassifier(n_estimators=50, random_state=0)
model = SelectFromModel(est, threshold="0.5*mean")
model.fit(data, y)
X_transform = model.transform(data)
# Calculate the threshold from the estimator directly.
est.fit(data, y)
threshold = 0.5 * np.mean(est.feature_importances_)
mask = est.feature_importances_ > threshold
assert_array_equal(X_transform, data[:, mask])
def test_threshold_without_refitting():
"""Test that the threshold can be set without refitting the model."""
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=0)
model = SelectFromModel(clf, threshold=0.1)
model.fit(data, y)
X_transform = model.transform(data)
# Set a higher threshold to filter out more features.
model.threshold = 1.0
assert_greater(X_transform.shape[1], model.transform(data).shape[1])
| bsd-3-clause |
jaredweiss/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pyplot.py | 69 | 77521 | import sys
import matplotlib
from matplotlib import _pylab_helpers, interactive
from matplotlib.cbook import dedent, silent_list, is_string_like, is_numlike
from matplotlib.figure import Figure, figaspect
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.image import imread as _imread
from matplotlib import rcParams, rcParamsDefault, get_backend
from matplotlib.rcsetup import interactive_bk as _interactive_bk
from matplotlib.artist import getp, get, Artist
from matplotlib.artist import setp as _setp
from matplotlib.axes import Axes
from matplotlib.projections import PolarAxes
from matplotlib import mlab # for csv2rec in plotfile
from matplotlib.scale import get_scale_docs, get_scale_names
from matplotlib import cm
from matplotlib.cm import get_cmap
# We may not need the following imports here:
from matplotlib.colors import Normalize, normalize # latter for backwards compat.
from matplotlib.lines import Line2D
from matplotlib.text import Text, Annotation
from matplotlib.patches import Polygon, Rectangle, Circle, Arrow
from matplotlib.widgets import SubplotTool, Button, Slider, Widget
from ticker import TickHelper, Formatter, FixedFormatter, NullFormatter,\
FuncFormatter, FormatStrFormatter, ScalarFormatter,\
LogFormatter, LogFormatterExponent, LogFormatterMathtext,\
Locator, IndexLocator, FixedLocator, NullLocator,\
LinearLocator, LogLocator, AutoLocator, MultipleLocator,\
MaxNLocator
## Backend detection ##
def _backend_selection():
""" If rcParams['backend_fallback'] is true, check to see if the
current backend is compatible with the current running event
loop, and if not switches to a compatible one.
"""
backend = rcParams['backend']
if not rcParams['backend_fallback'] or \
backend not in _interactive_bk:
return
is_agg_backend = rcParams['backend'].endswith('Agg')
if 'wx' in sys.modules and not backend in ('WX', 'WXAgg'):
import wx
if wx.App.IsMainLoopRunning():
rcParams['backend'] = 'wx' + 'Agg' * is_agg_backend
elif 'qt' in sys.modules and not backend == 'QtAgg':
import qt
if not qt.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qtAgg'
elif 'PyQt4.QtCore' in sys.modules and not backend == 'Qt4Agg':
import PyQt4.QtGui
if not PyQt4.QtGui.qApp.startingUp():
# The mainloop is running.
rcParams['backend'] = 'qt4Agg'
elif 'gtk' in sys.modules and not backend in ('GTK', 'GTKAgg',
'GTKCairo'):
import gobject
if gobject.MainLoop().is_running():
rcParams['backend'] = 'gtk' + 'Agg' * is_agg_backend
elif 'Tkinter' in sys.modules and not backend == 'TkAgg':
#import Tkinter
pass #what if anything do we need to do for tkinter?
_backend_selection()
## Global ##
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def findobj(o=None, match=None):
if o is None:
o = gcf()
return o.findobj(match)
findobj.__doc__ = Artist.findobj.__doc__
def switch_backend(newbackend):
"""
Switch the default backend to newbackend. This feature is
**experimental**, and is only expected to work switching to an
image backend. Eg, if you have a bunch of PostScript scripts that
you want to run from an interactive ipython session, you may want
to switch to the PS backend before running them to avoid having a
bunch of GUI windows popup. If you try to interactively switch
from one GUI backend to another, you will explode.
Calling this command will close all open windows.
"""
close('all')
global new_figure_manager, draw_if_interactive, show
matplotlib.use(newbackend, warn=False)
reload(matplotlib.backends)
from matplotlib.backends import pylab_setup
new_figure_manager, draw_if_interactive, show = pylab_setup()
def isinteractive():
"""
Return the interactive status
"""
return matplotlib.is_interactive()
def ioff():
'Turn interactive mode off.'
matplotlib.interactive(False)
def ion():
'Turn interactive mode on.'
matplotlib.interactive(True)
def rc(*args, **kwargs):
matplotlib.rc(*args, **kwargs)
if matplotlib.rc.__doc__ is not None:
rc.__doc__ = dedent(matplotlib.rc.__doc__)
def rcdefaults():
matplotlib.rcdefaults()
draw_if_interactive()
if matplotlib.rcdefaults.__doc__ is not None:
rcdefaults.__doc__ = dedent(matplotlib.rcdefaults.__doc__)
# The current "image" (ScalarMappable) is tracked here on a
# per-pylab-session basis:
def gci():
"""
Get the current :class:`~matplotlib.cm.ScalarMappable` instance
(image or patch collection), or *None* if no images or patch
collections have been defined. The commands
:func:`~matplotlib.pyplot.imshow` and
:func:`~matplotlib.pyplot.figimage` create
:class:`~matplotlib.image.Image` instances, and the commands
:func:`~matplotlib.pyplot.pcolor` and
:func:`~matplotlib.pyplot.scatter` create
:class:`~matplotlib.collections.Collection` instances.
"""
return gci._current
gci._current = None
def sci(im):
"""
Set the current image (target of colormap commands like
:func:`~matplotlib.pyplot.jet`, :func:`~matplotlib.pyplot.hot` or
:func:`~matplotlib.pyplot.clim`).
"""
gci._current = im
## Any Artist ##
# (getp is simply imported)
def setp(*args, **kwargs):
ret = _setp(*args, **kwargs)
draw_if_interactive()
return ret
if _setp.__doc__ is not None:
setp.__doc__ = _setp.__doc__
## Figures ##
def figure(num=None, # autoincrement if None, else integer from 1-N
figsize = None, # defaults to rc figure.figsize
dpi = None, # defaults to rc figure.dpi
facecolor = None, # defaults to rc figure.facecolor
edgecolor = None, # defaults to rc figure.edgecolor
frameon = True,
FigureClass = Figure,
**kwargs
):
"""
call signature::
figure(num=None, figsize=(8, 6), dpi=80, facecolor='w', edgecolor='k')
Create a new figure and return a :class:`matplotlib.figure.Figure`
instance. If *num* = *None*, the figure number will be incremented and
a new figure will be created. The returned figure objects have a
*number* attribute holding this number.
If *num* is an integer, and ``figure(num)`` already exists, make it
active and return the handle to it. If ``figure(num)`` does not exist
it will be created. Numbering starts at 1, matlab style::
figure(1)
If you are creating many figures, make sure you explicitly call "close"
on the figures you are not using, because this will enable pylab
to properly clean up the memory.
Optional keyword arguments:
========= =======================================================
Keyword Description
========= =======================================================
figsize width x height in inches; defaults to rc figure.figsize
dpi resolution; defaults to rc figure.dpi
facecolor the background color; defaults to rc figure.facecolor
edgecolor the border color; defaults to rc figure.edgecolor
========= =======================================================
rcParams defines the default values, which can be modified in the
matplotlibrc file
*FigureClass* is a :class:`~matplotlib.figure.Figure` or derived
class that will be passed on to :meth:`new_figure_manager` in the
backends which allows you to hook custom Figure classes into the
pylab interface. Additional kwargs will be passed on to your
figure init function.
"""
if figsize is None : figsize = rcParams['figure.figsize']
if dpi is None : dpi = rcParams['figure.dpi']
if facecolor is None : facecolor = rcParams['figure.facecolor']
if edgecolor is None : edgecolor = rcParams['figure.edgecolor']
if num is None:
allnums = [f.num for f in _pylab_helpers.Gcf.get_all_fig_managers()]
if allnums:
num = max(allnums) + 1
else:
num = 1
else:
num = int(num) # crude validation of num argument
figManager = _pylab_helpers.Gcf.get_fig_manager(num)
if figManager is None:
if get_backend().lower() == 'ps': dpi = 72
figManager = new_figure_manager(num, figsize=figsize,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
frameon=frameon,
FigureClass=FigureClass,
**kwargs)
# make this figure current on button press event
def make_active(event):
_pylab_helpers.Gcf.set_active(figManager)
cid = figManager.canvas.mpl_connect('button_press_event', make_active)
figManager._cidgcf = cid
_pylab_helpers.Gcf.set_active(figManager)
figManager.canvas.figure.number = num
draw_if_interactive()
return figManager.canvas.figure
def gcf():
"Return a handle to the current figure."
figManager = _pylab_helpers.Gcf.get_active()
if figManager is not None:
return figManager.canvas.figure
else:
return figure()
def get_current_fig_manager():
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None:
gcf() # creates an active figure as a side effect
figManager = _pylab_helpers.Gcf.get_active()
return figManager
# note we check for __doc__ is not None since py2exe optimize removes
# the docstrings
def connect(s, func):
return get_current_fig_manager().canvas.mpl_connect(s, func)
if FigureCanvasBase.mpl_connect.__doc__ is not None:
connect.__doc__ = dedent(FigureCanvasBase.mpl_connect.__doc__)
def disconnect(cid):
return get_current_fig_manager().canvas.mpl_disconnect(cid)
if FigureCanvasBase.mpl_disconnect.__doc__ is not None:
disconnect.__doc__ = dedent(FigureCanvasBase.mpl_disconnect.__doc__)
def close(*args):
"""
Close a figure window
``close()`` by itself closes the current figure
``close(num)`` closes figure number *num*
``close(h)`` where *h* is a :class:`Figure` instance, closes that figure
``close('all')`` closes all the figure windows
"""
if len(args)==0:
figManager = _pylab_helpers.Gcf.get_active()
if figManager is None: return
else:
figManager.canvas.mpl_disconnect(figManager._cidgcf)
_pylab_helpers.Gcf.destroy(figManager.num)
elif len(args)==1:
arg = args[0]
if arg=='all':
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
elif isinstance(arg, int):
_pylab_helpers.Gcf.destroy(arg)
elif isinstance(arg, Figure):
for manager in _pylab_helpers.Gcf.get_all_fig_managers():
if manager.canvas.figure==arg:
manager.canvas.mpl_disconnect(manager._cidgcf)
_pylab_helpers.Gcf.destroy(manager.num)
else:
raise TypeError('Unrecognized argument type %s to close'%type(arg))
else:
raise TypeError('close takes 0 or 1 arguments')
def clf():
"""
Clear the current figure
"""
gcf().clf()
draw_if_interactive()
def draw():
'redraw the current figure'
get_current_fig_manager().canvas.draw()
def savefig(*args, **kwargs):
fig = gcf()
return fig.savefig(*args, **kwargs)
if Figure.savefig.__doc__ is not None:
savefig.__doc__ = dedent(Figure.savefig.__doc__)
def ginput(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* clicks from the user and return a list of the
coordinates of each click.
If *timeout* is negative, does not timeout.
"""
return gcf().ginput(*args, **kwargs)
if Figure.ginput.__doc__ is not None:
ginput.__doc__ = dedent(Figure.ginput.__doc__)
def waitforbuttonpress(*args, **kwargs):
"""
Blocking call to interact with the figure.
This will wait for *n* key or mouse clicks from the user and
return a list containing True's for keyboard clicks and False's
for mouse clicks.
If *timeout* is negative, does not timeout.
"""
return gcf().waitforbuttonpress(*args, **kwargs)
if Figure.waitforbuttonpress.__doc__ is not None:
waitforbuttonpress.__doc__ = dedent(Figure.waitforbuttonpress.__doc__)
# Putting things in figures
def figtext(*args, **kwargs):
ret = gcf().text(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.text.__doc__ is not None:
figtext.__doc__ = dedent(Figure.text.__doc__)
def suptitle(*args, **kwargs):
ret = gcf().suptitle(*args, **kwargs)
draw_if_interactive()
return ret
if Figure.suptitle.__doc__ is not None:
suptitle.__doc__ = dedent(Figure.suptitle.__doc__)
def figimage(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
ret = gcf().figimage(*args, **kwargs)
draw_if_interactive()
gci._current = ret
return ret
if Figure.figimage.__doc__ is not None:
figimage.__doc__ = dedent(Figure.figimage.__doc__) + """
Addition kwargs: hold = [True|False] overrides default hold state"""
def figlegend(handles, labels, loc, **kwargs):
"""
Place a legend in the figure.
*labels*
a sequence of strings
*handles*
a sequence of :class:`~matplotlib.lines.Line2D` or
:class:`~matplotlib.patches.Patch` instances
*loc*
can be a string or an integer specifying the legend
location
A :class:`matplotlib.legend.Legend` instance is returned.
Example::
figlegend( (line1, line2, line3),
('label1', 'label2', 'label3'),
'upper right' )
.. seealso::
:func:`~matplotlib.pyplot.legend`:
For information about the location codes
"""
l = gcf().legend(handles, labels, loc, **kwargs)
draw_if_interactive()
return l
## Figure and Axes hybrid ##
def hold(b=None):
"""
Set the hold state. If *b* is None (default), toggle the
hold state, else set the hold state to boolean value *b*::
hold() # toggle hold
hold(True) # hold is on
hold(False) # hold is off
When *hold* is *True*, subsequent plot commands will be added to
the current axes. When *hold* is *False*, the current axes and
figure will be cleared on the next plot command.
"""
fig = gcf()
ax = fig.gca()
fig.hold(b)
ax.hold(b)
# b=None toggles the hold state, so let's get get the current hold
# state; but should pyplot hold toggle the rc setting - me thinks
# not
b = ax.ishold()
rc('axes', hold=b)
def ishold():
"""
Return the hold status of the current axes
"""
return gca().ishold()
def over(func, *args, **kwargs):
"""
over calls::
func(*args, **kwargs)
with ``hold(True)`` and then restores the hold state.
"""
h = ishold()
hold(True)
func(*args, **kwargs)
hold(h)
## Axes ##
def axes(*args, **kwargs):
"""
Add an axes at position rect specified by:
- ``axes()`` by itself creates a default full ``subplot(111)`` window axis.
- ``axes(rect, axisbg='w')`` where *rect* = [left, bottom, width,
height] in normalized (0, 1) units. *axisbg* is the background
color for the axis, default white.
- ``axes(h)`` where *h* is an axes instance makes *h* the current
axis. An :class:`~matplotlib.axes.Axes` instance is returned.
======= ============ ================================================
kwarg Accepts Desctiption
======= ============ ================================================
axisbg color the axes background color
frameon [True|False] display the frame?
sharex otherax current axes shares xaxis attribute with otherax
sharey otherax current axes shares yaxis attribute with otherax
polar [True|False] use a polar axes?
======= ============ ================================================
Examples:
* :file:`examples/pylab_examples/axes_demo.py` places custom axes.
* :file:`examples/pylab_examples/shared_axis_demo.py` uses
*sharex* and *sharey*.
"""
nargs = len(args)
if len(args)==0: return subplot(111, **kwargs)
if nargs>1:
raise TypeError('Only one non keyword arg to axes allowed')
arg = args[0]
if isinstance(arg, Axes):
a = gcf().sca(arg)
else:
rect = arg
a = gcf().add_axes(rect, **kwargs)
draw_if_interactive()
return a
def delaxes(*args):
"""
``delaxes(ax)``: remove *ax* from the current figure. If *ax*
doesn't exist, an error will be raised.
``delaxes()``: delete the current axes
"""
if not len(args):
ax = gca()
else:
ax = args[0]
ret = gcf().delaxes(ax)
draw_if_interactive()
return ret
def gca(**kwargs):
"""
Return the current axis instance. This can be used to control
axis properties either using set or the
:class:`~matplotlib.axes.Axes` methods, for example, setting the
xaxis range::
plot(t,s)
set(gca(), 'xlim', [0,10])
or::
plot(t,s)
a = gca()
a.set_xlim([0,10])
"""
ax = gcf().gca(**kwargs)
return ax
# More ways of creating axes:
def subplot(*args, **kwargs):
"""
Create a subplot command, creating axes with::
subplot(numRows, numCols, plotNum)
where *plotNum* = 1 is the first plot number and increasing *plotNums*
fill rows first. max(*plotNum*) == *numRows* * *numCols*
You can leave out the commas if *numRows* <= *numCols* <=
*plotNum* < 10, as in::
subplot(211) # 2 rows, 1 column, first (upper) plot
``subplot(111)`` is the default axis.
New subplots that overlap old will delete the old axes. If you do
not want this behavior, use
:meth:`matplotlib.figure.Figure.add_subplot` or the
:func:`~matplotlib.pyplot.axes` command. Eg.::
from pylab import *
plot([1,2,3]) # implicitly creates subplot(111)
subplot(211) # overlaps, subplot(111) is killed
plot(rand(12), rand(12))
subplot(212, axisbg='y') # creates 2nd subplot with yellow background
Keyword arguments:
*axisbg*:
The background color of the subplot, which can be any valid
color specifier. See :mod:`matplotlib.colors` for more
information.
*polar*:
A boolean flag indicating whether the subplot plot should be
a polar projection. Defaults to False.
*projection*:
A string giving the name of a custom projection to be used
for the subplot. This projection must have been previously
registered. See :func:`matplotlib.projections.register_projection`
.. seealso::
:func:`~matplotlib.pyplot.axes`:
For additional information on :func:`axes` and
:func:`subplot` keyword arguments.
:file:`examples/pylab_examples/polar_scatter.py`
**Example:**
.. plot:: mpl_examples/pylab_examples/subplot_demo.py
"""
fig = gcf()
a = fig.add_subplot(*args, **kwargs)
bbox = a.bbox
byebye = []
for other in fig.axes:
if other==a: continue
if bbox.fully_overlaps(other.bbox):
byebye.append(other)
for ax in byebye: delaxes(ax)
draw_if_interactive()
return a
def twinx(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the xaxis. The ticks for *ax2* will be placed on
the right, and the *ax2* instance is returned.
.. seealso::
:file:`examples/api_examples/two_scales.py`
"""
if ax is None:
ax=gca()
ax1 = ax.twinx()
draw_if_interactive()
return ax1
def twiny(ax=None):
"""
Make a second axes overlay *ax* (or the current axes if *ax* is
*None*) sharing the yaxis. The ticks for *ax2* will be placed on
the top, and the *ax2* instance is returned.
"""
if ax is None:
ax=gca()
ax1 = ax.twiny()
draw_if_interactive()
return ax1
def subplots_adjust(*args, **kwargs):
"""
call signature::
subplots_adjust(left=None, bottom=None, right=None, top=None,
wspace=None, hspace=None)
Tune the subplot layout via the
:class:`matplotlib.figure.SubplotParams` mechanism. The parameter
meanings (and suggested defaults) are::
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for blank space between subplots
hspace = 0.2 # the amount of height reserved for white space between subplots
The actual defaults are controlled by the rc file
"""
fig = gcf()
fig.subplots_adjust(*args, **kwargs)
draw_if_interactive()
def subplot_tool(targetfig=None):
"""
Launch a subplot tool window for *targetfig* (default gcf).
A :class:`matplotlib.widgets.SubplotTool` instance is returned.
"""
tbar = rcParams['toolbar'] # turn off the navigation toolbar for the toolfig
rcParams['toolbar'] = 'None'
if targetfig is None:
manager = get_current_fig_manager()
targetfig = manager.canvas.figure
else:
# find the manager for this figure
for manager in _pylab_helpers.Gcf._activeQue:
if manager.canvas.figure==targetfig: break
else: raise RuntimeError('Could not find manager for targetfig')
toolfig = figure(figsize=(6,3))
toolfig.subplots_adjust(top=0.9)
ret = SubplotTool(targetfig, toolfig)
rcParams['toolbar'] = tbar
_pylab_helpers.Gcf.set_active(manager) # restore the current figure
return ret
def box(on=None):
"""
Turn the axes box on or off according to *on*.
If *on* is *None*, toggle state.
"""
ax = gca()
if on is None:
on = not ax.get_frame_on()
ax.set_frame_on(on)
draw_if_interactive()
def title(s, *args, **kwargs):
"""
Set the title of the current axis to *s*.
Default font override is::
override = {'fontsize': 'medium',
'verticalalignment': 'bottom',
'horizontalalignment': 'center'}
.. seealso::
:func:`~matplotlib.pyplot.text`:
for information on how override and the optional args work.
"""
l = gca().set_title(s, *args, **kwargs)
draw_if_interactive()
return l
## Axis ##
def axis(*v, **kwargs):
"""
Set/Get the axis properties:
>>> axis()
returns the current axes limits ``[xmin, xmax, ymin, ymax]``.
>>> axis(v)
sets the min and max of the x and y axes, with
``v = [xmin, xmax, ymin, ymax]``.
>>> axis('off')
turns off the axis lines and labels.
>>> axis('equal')
changes limits of *x* or *y* axis so that equal increments of *x*
and *y* have the same length; a circle is circular.
>>> axis('scaled')
achieves the same result by changing the dimensions of the plot box instead
of the axis data limits.
>>> axis('tight')
changes *x* and *y* axis limits such that all data is shown. If
all data is already shown, it will move it to the center of the
figure without modifying (*xmax* - *xmin*) or (*ymax* -
*ymin*). Note this is slightly different than in matlab.
>>> axis('image')
is 'scaled' with the axis limits equal to the data limits.
>>> axis('auto')
and
>>> axis('normal')
are deprecated. They restore default behavior; axis limits are automatically
scaled to make the data fit comfortably within the plot box.
if ``len(*v)==0``, you can pass in *xmin*, *xmax*, *ymin*, *ymax*
as kwargs selectively to alter just those limits without changing
the others.
The xmin, xmax, ymin, ymax tuple is returned
.. seealso::
:func:`xlim`, :func:`ylim`
"""
ax = gca()
v = ax.axis(*v, **kwargs)
draw_if_interactive()
return v
def xlabel(s, *args, **kwargs):
"""
Set the *x* axis label of the current axis to *s*
Default override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'top',
'horizontalalignment' : 'center'
}
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args work
"""
l = gca().set_xlabel(s, *args, **kwargs)
draw_if_interactive()
return l
def ylabel(s, *args, **kwargs):
"""
Set the *y* axis label of the current axis to *s*.
Defaults override is::
override = {
'fontsize' : 'small',
'verticalalignment' : 'center',
'horizontalalignment' : 'right',
'rotation'='vertical' : }
.. seealso::
:func:`~matplotlib.pyplot.text`:
For information on how override and the optional args
work.
"""
l = gca().set_ylabel(s, *args, **kwargs)
draw_if_interactive()
return l
def xlim(*args, **kwargs):
"""
Set/Get the xlimits of the current axes::
xmin, xmax = xlim() # return the current xlim
xlim( (xmin, xmax) ) # set the xlim to xmin, xmax
xlim( xmin, xmax ) # set the xlim to xmin, xmax
If you do not specify args, you can pass the xmin and xmax as
kwargs, eg.::
xlim(xmax=3) # adjust the max leaving min unchanged
xlim(xmin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_xlim(*args, **kwargs)
draw_if_interactive()
return ret
def ylim(*args, **kwargs):
"""
Set/Get the ylimits of the current axes::
ymin, ymax = ylim() # return the current ylim
ylim( (ymin, ymax) ) # set the ylim to ymin, ymax
ylim( ymin, ymax ) # set the ylim to ymin, ymax
If you do not specify args, you can pass the *ymin* and *ymax* as
kwargs, eg.::
ylim(ymax=3) # adjust the max leaving min unchanged
ylim(ymin=1) # adjust the min leaving max unchanged
The new axis limits are returned as a length 2 tuple.
"""
ax = gca()
ret = ax.set_ylim(*args, **kwargs)
draw_if_interactive()
return ret
def xscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the x-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_xscale(*args, **kwargs)
draw_if_interactive()
return ret
xscale.__doc__ = dedent(xscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def yscale(*args, **kwargs):
"""
call signature::
xscale(scale, **kwargs)
Set the scaling for the y-axis: %(scale)s
Different keywords may be accepted, depending on the scale:
%(scale_docs)s
"""
ax = gca()
ret = ax.set_yscale(*args, **kwargs)
draw_if_interactive()
return ret
yscale.__doc__ = dedent(yscale.__doc__) % {
'scale': ' | '.join([repr(_x) for _x in get_scale_names()]),
'scale_docs': get_scale_docs()}
def xticks(*args, **kwargs):
"""
Set/Get the xlimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = xticks()
# set the locations of the xticks
xticks( arange(6) )
# set the locations and labels of the xticks
xticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_xticks()
labels = ax.get_xticklabels()
elif len(args)==1:
locs = ax.set_xticks(args[0])
labels = ax.get_xticklabels()
elif len(args)==2:
locs = ax.set_xticks(args[0])
labels = ax.set_xticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to xticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return locs, silent_list('Text xticklabel', labels)
def yticks(*args, **kwargs):
"""
Set/Get the ylimits of the current ticklocs and labels::
# return locs, labels where locs is an array of tick locations and
# labels is an array of tick labels.
locs, labels = yticks()
# set the locations of the yticks
yticks( arange(6) )
# set the locations and labels of the yticks
yticks( arange(5), ('Tom', 'Dick', 'Harry', 'Sally', 'Sue') )
The keyword args, if any, are :class:`~matplotlib.text.Text`
properties.
"""
ax = gca()
if len(args)==0:
locs = ax.get_yticks()
labels = ax.get_yticklabels()
elif len(args)==1:
locs = ax.set_yticks(args[0])
labels = ax.get_yticklabels()
elif len(args)==2:
locs = ax.set_yticks(args[0])
labels = ax.set_yticklabels(args[1], **kwargs)
else: raise TypeError('Illegal number of arguments to yticks')
if len(kwargs):
for l in labels:
l.update(kwargs)
draw_if_interactive()
return ( locs,
silent_list('Text yticklabel', labels)
)
def rgrids(*args, **kwargs):
"""
Set/Get the radial locations of the gridlines and ticklabels on a
polar plot.
call signatures::
lines, labels = rgrids()
lines, labels = rgrids(radii, labels=None, angle=22.5, **kwargs)
When called with no arguments, :func:`rgrid` simply returns the
tuple (*lines*, *labels*), where *lines* is an array of radial
gridlines (:class:`~matplotlib.lines.Line2D` instances) and
*labels* is an array of tick labels
(:class:`~matplotlib.text.Text` instances). When called with
arguments, the labels will appear at the specified radial
distances and angles.
*labels*, if not *None*, is a len(*radii*) list of strings of the
labels to use at each angle.
If *labels* is None, the rformatter will be used
Examples::
# set the locations of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0) )
# set the locations and labels of the radial gridlines and labels
lines, labels = rgrids( (0.25, 0.5, 1.0), ('Tom', 'Dick', 'Harry' )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.yaxis.get_ticklines()
labels = ax.yaxis.get_ticklabels()
else:
lines, labels = ax.set_rgrids(*args, **kwargs)
draw_if_interactive()
return ( silent_list('Line2D rgridline', lines),
silent_list('Text rgridlabel', labels) )
def thetagrids(*args, **kwargs):
"""
Set/Get the theta locations of the gridlines and ticklabels.
If no arguments are passed, return a tuple (*lines*, *labels*)
where *lines* is an array of radial gridlines
(:class:`~matplotlib.lines.Line2D` instances) and *labels* is an
array of tick labels (:class:`~matplotlib.text.Text` instances)::
lines, labels = thetagrids()
Otherwise the syntax is::
lines, labels = thetagrids(angles, labels=None, fmt='%d', frac = 1.1)
set the angles at which to place the theta grids (these gridlines
are equal along the theta dimension).
*angles* is in degrees.
*labels*, if not *None*, is a len(angles) list of strings of the
labels to use at each angle.
If *labels* is *None*, the labels will be ``fmt%angle``.
*frac* is the fraction of the polar axes radius at which to place
the label (1 is the edge). Eg. 1.05 is outside the axes and 0.95
is inside the axes.
Return value is a list of tuples (*lines*, *labels*):
- *lines* are :class:`~matplotlib.lines.Line2D` instances
- *labels* are :class:`~matplotlib.text.Text` instances.
Note that on input, the *labels* argument is a list of strings,
and on output it is a list of :class:`~matplotlib.text.Text`
instances.
Examples::
# set the locations of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90) )
# set the locations and labels of the radial gridlines and labels
lines, labels = thetagrids( range(45,360,90), ('NE', 'NW', 'SW','SE') )
"""
ax = gca()
if not isinstance(ax, PolarAxes):
raise RuntimeError('rgrids only defined for polar axes')
if len(args)==0:
lines = ax.xaxis.get_ticklines()
labels = ax.xaxis.get_ticklabels()
else:
lines, labels = ax.set_thetagrids(*args, **kwargs)
draw_if_interactive()
return (silent_list('Line2D thetagridline', lines),
silent_list('Text thetagridlabel', labels)
)
## Plotting Info ##
def plotting():
"""
Plotting commands
=============== =========================================================
Command Description
=============== =========================================================
axes Create a new axes
axis Set or return the current axis limits
bar make a bar chart
boxplot make a box and whiskers chart
cla clear current axes
clabel label a contour plot
clf clear a figure window
close close a figure window
colorbar add a colorbar to the current figure
cohere make a plot of coherence
contour make a contour plot
contourf make a filled contour plot
csd make a plot of cross spectral density
draw force a redraw of the current figure
errorbar make an errorbar graph
figlegend add a legend to the figure
figimage add an image to the figure, w/o resampling
figtext add text in figure coords
figure create or change active figure
fill make filled polygons
fill_between make filled polygons
gca return the current axes
gcf return the current figure
gci get the current image, or None
getp get a handle graphics property
hist make a histogram
hold set the hold state on current axes
legend add a legend to the axes
loglog a log log plot
imread load image file into array
imshow plot image data
matshow display a matrix in a new figure preserving aspect
pcolor make a pseudocolor plot
plot make a line plot
plotfile plot data from a flat file
psd make a plot of power spectral density
quiver make a direction field (arrows) plot
rc control the default params
savefig save the current figure
scatter make a scatter plot
setp set a handle graphics property
semilogx log x axis
semilogy log y axis
show show the figures
specgram a spectrogram plot
stem make a stem plot
subplot make a subplot (numrows, numcols, axesnum)
table add a table to the axes
text add some text at location x,y to the current axes
title add a title to the current axes
xlabel add an xlabel to the current axes
ylabel add a ylabel to the current axes
=============== =========================================================
The following commands will set the default colormap accordingly:
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
"""
pass
def get_plot_commands(): return ( 'axes', 'axis', 'bar', 'boxplot', 'cla', 'clf',
'close', 'colorbar', 'cohere', 'csd', 'draw', 'errorbar',
'figlegend', 'figtext', 'figimage', 'figure', 'fill', 'gca',
'gcf', 'gci', 'get', 'gray', 'barh', 'jet', 'hist', 'hold', 'imread',
'imshow', 'legend', 'loglog', 'quiver', 'rc', 'pcolor', 'pcolormesh', 'plot', 'psd',
'savefig', 'scatter', 'set', 'semilogx', 'semilogy', 'show',
'specgram', 'stem', 'subplot', 'table', 'text', 'title', 'xlabel',
'ylabel', 'pie', 'polar')
def colors():
"""
This is a do nothing function to provide you with help on how
matplotlib handles colors.
Commands which take color arguments can use several formats to
specify the colors. For the basic builtin colors, you can use a
single letter
===== =======
Alias Color
===== =======
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
===== =======
For a greater range of colors, you have two options. You can
specify the color using an html hex string, as in::
color = '#eeefff'
or you can pass an R,G,B tuple, where each of R,G,B are in the
range [0,1].
You can also use any legal html name for a color, for example::
color = 'red',
color = 'burlywood'
color = 'chartreuse'
The example below creates a subplot with a dark
slate gray background
subplot(111, axisbg=(0.1843, 0.3098, 0.3098))
Here is an example that creates a pale turqoise title::
title('Is this the best color?', color='#afeeee')
"""
pass
def colormaps():
"""
matplotlib provides the following colormaps.
* autumn
* bone
* cool
* copper
* flag
* gray
* hot
* hsv
* jet
* pink
* prism
* spring
* summer
* winter
* spectral
You can set the colormap for an image, pcolor, scatter, etc,
either as a keyword argument::
imshow(X, cmap=cm.hot)
or post-hoc using the corresponding pylab interface function::
imshow(X)
hot()
jet()
In interactive mode, this will update the colormap allowing you to
see which one works best for your data.
"""
pass
## Plotting part 1: manually generated functions and wrappers ##
from matplotlib.colorbar import colorbar_doc
def colorbar(mappable=None, cax=None, ax=None, **kw):
if mappable is None:
mappable = gci()
if ax is None:
ax = gca()
ret = gcf().colorbar(mappable, cax = cax, ax=ax, **kw)
draw_if_interactive()
return ret
colorbar.__doc__ = colorbar_doc
def clim(vmin=None, vmax=None):
"""
Set the color limits of the current image
To apply clim to all axes images do::
clim(0, 0.5)
If either *vmin* or *vmax* is None, the image min/max respectively
will be used for color scaling.
If you want to set the clim of multiple images,
use, for example::
for im in gca().get_images():
im.set_clim(0, 0.05)
"""
im = gci()
if im is None:
raise RuntimeError('You must first define an image, eg with imshow')
im.set_clim(vmin, vmax)
draw_if_interactive()
def imread(*args, **kwargs):
return _imread(*args, **kwargs)
if _imread.__doc__ is not None:
imread.__doc__ = dedent(_imread.__doc__)
def matshow(A, fignum=None, **kw):
"""
Display an array as a matrix in a new figure window.
The origin is set at the upper left hand corner and rows (first
dimension of the array) are displayed horizontally. The aspect
ratio of the figure window is that of the array, unless this would
make an excessively short or narrow figure.
Tick labels for the xaxis are placed on top.
With the exception of fignum, keyword arguments are passed to
:func:`~matplotlib.pyplot.imshow`.
*fignum*: [ None | integer | False ]
By default, :func:`matshow` creates a new figure window with
automatic numbering. If *fignum* is given as an integer, the
created figure will use this figure number. Because of how
:func:`matshow` tries to set the figure aspect ratio to be the
one of the array, if you provide the number of an already
existing figure, strange things may happen.
If *fignum* is *False* or 0, a new figure window will **NOT** be created.
"""
if fignum is False or fignum is 0:
ax = gca()
else:
# Extract actual aspect ratio of array and make appropriately sized figure
fig = figure(fignum, figsize=figaspect(A))
ax = fig.add_axes([0.15, 0.09, 0.775, 0.775])
im = ax.matshow(A, **kw)
gci._current = im
draw_if_interactive()
return im
def polar(*args, **kwargs):
"""
call signature::
polar(theta, r, **kwargs)
Make a polar plot. Multiple *theta*, *r* arguments are supported,
with format strings, as in :func:`~matplotlib.pyplot.plot`.
"""
ax = gca(polar=True)
ret = ax.plot(*args, **kwargs)
draw_if_interactive()
return ret
def plotfile(fname, cols=(0,), plotfuncs=None,
comments='#', skiprows=0, checkrows=5, delimiter=',',
**kwargs):
"""
Plot the data in *fname*
*cols* is a sequence of column identifiers to plot. An identifier
is either an int or a string. If it is an int, it indicates the
column number. If it is a string, it indicates the column header.
matplotlib will make column headers lower case, replace spaces with
underscores, and remove all illegal characters; so ``'Adj Close*'``
will have name ``'adj_close'``.
- If len(*cols*) == 1, only that column will be plotted on the *y* axis.
- If len(*cols*) > 1, the first element will be an identifier for
data for the *x* axis and the remaining elements will be the
column indexes for multiple subplots
*plotfuncs*, if not *None*, is a dictionary mapping identifier to
an :class:`~matplotlib.axes.Axes` plotting function as a string.
Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
etc. You must use the same type of identifier in the *cols*
vector as you use in the *plotfuncs* dictionary, eg., integer
column numbers in both or column names in both.
*comments*, *skiprows*, *checkrows*, and *delimiter* are all passed on to
:func:`matplotlib.pylab.csv2rec` to load the data into a record array.
kwargs are passed on to plotting functions.
Example usage::
# plot the 2nd and 4th column against the 1st in two subplots
plotfile(fname, (0,1,3))
# plot using column names; specify an alternate plot type for volume
plotfile(fname, ('date', 'volume', 'adj_close'), plotfuncs={'volume': 'semilogy'})
"""
fig = figure()
if len(cols)<1:
raise ValueError('must have at least one column of data')
if plotfuncs is None:
plotfuncs = dict()
r = mlab.csv2rec(fname, comments=comments,
skiprows=skiprows, checkrows=checkrows, delimiter=delimiter)
def getname_val(identifier):
'return the name and column data for identifier'
if is_string_like(identifier):
return identifier, r[identifier]
elif is_numlike(identifier):
name = r.dtype.names[int(identifier)]
return name, r[name]
else:
raise TypeError('identifier must be a string or integer')
xname, x = getname_val(cols[0])
if len(cols)==1:
ax1 = fig.add_subplot(1,1,1)
funcname = plotfuncs.get(cols[0], 'plot')
func = getattr(ax1, funcname)
func(x, **kwargs)
ax1.set_xlabel(xname)
else:
N = len(cols)
for i in range(1,N):
if i==1:
ax = ax1 = fig.add_subplot(N-1,1,i)
ax.grid(True)
else:
ax = fig.add_subplot(N-1,1,i, sharex=ax1)
ax.grid(True)
yname, y = getname_val(cols[i])
funcname = plotfuncs.get(cols[i], 'plot')
func = getattr(ax, funcname)
func(x, y, **kwargs)
ax.set_ylabel(yname)
if ax.is_last_row():
ax.set_xlabel(xname)
else:
ax.set_xlabel('')
if xname=='date':
fig.autofmt_xdate()
draw_if_interactive()
## Plotting part 2: autogenerated wrappers for axes methods ##
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def acorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().acorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.acorr.__doc__ is not None:
acorr.__doc__ = dedent(Axes.acorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def arrow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().arrow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.arrow.__doc__ is not None:
arrow.__doc__ = dedent(Axes.arrow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhline.__doc__ is not None:
axhline.__doc__ = dedent(Axes.axhline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axhspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axhspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axhspan.__doc__ is not None:
axhspan.__doc__ = dedent(Axes.axhspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvline(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvline(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvline.__doc__ is not None:
axvline.__doc__ = dedent(Axes.axvline.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def axvspan(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().axvspan(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.axvspan.__doc__ is not None:
axvspan.__doc__ = dedent(Axes.axvspan.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().bar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.bar.__doc__ is not None:
bar.__doc__ = dedent(Axes.bar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barh.__doc__ is not None:
barh.__doc__ = dedent(Axes.barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def broken_barh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().broken_barh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.broken_barh.__doc__ is not None:
broken_barh.__doc__ = dedent(Axes.broken_barh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def boxplot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().boxplot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.boxplot.__doc__ is not None:
boxplot.__doc__ = dedent(Axes.boxplot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cohere(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().cohere(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.cohere.__doc__ is not None:
cohere.__doc__ = dedent(Axes.cohere.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def clabel(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().clabel(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.clabel.__doc__ is not None:
clabel.__doc__ = dedent(Axes.clabel.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contour(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contour(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contour.__doc__ is not None:
contour.__doc__ = dedent(Axes.contour.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def contourf(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().contourf(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
if ret._A is not None: gci._current = ret
hold(b)
return ret
if Axes.contourf.__doc__ is not None:
contourf.__doc__ = dedent(Axes.contourf.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def csd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().csd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.csd.__doc__ is not None:
csd.__doc__ = dedent(Axes.csd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def errorbar(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().errorbar(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.errorbar.__doc__ is not None:
errorbar.__doc__ = dedent(Axes.errorbar.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill.__doc__ is not None:
fill.__doc__ = dedent(Axes.fill.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def fill_between(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().fill_between(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.fill_between.__doc__ is not None:
fill_between.__doc__ = dedent(Axes.fill_between.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hexbin(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hexbin(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.hexbin.__doc__ is not None:
hexbin.__doc__ = dedent(Axes.hexbin.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hist(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hist(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hist.__doc__ is not None:
hist.__doc__ = dedent(Axes.hist.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().hlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.hlines.__doc__ is not None:
hlines.__doc__ = dedent(Axes.hlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def imshow(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().imshow(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.imshow.__doc__ is not None:
imshow.__doc__ = dedent(Axes.imshow.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def loglog(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().loglog(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.loglog.__doc__ is not None:
loglog.__doc__ = dedent(Axes.loglog.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolor(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolor(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolor.__doc__ is not None:
pcolor.__doc__ = dedent(Axes.pcolor.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pcolormesh(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pcolormesh(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.pcolormesh.__doc__ is not None:
pcolormesh.__doc__ = dedent(Axes.pcolormesh.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pie(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().pie(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.pie.__doc__ is not None:
pie.__doc__ = dedent(Axes.pie.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot.__doc__ is not None:
plot.__doc__ = dedent(Axes.plot.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def plot_date(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().plot_date(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.plot_date.__doc__ is not None:
plot_date.__doc__ = dedent(Axes.plot_date.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def psd(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().psd(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.psd.__doc__ is not None:
psd.__doc__ = dedent(Axes.psd.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiver(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiver(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.quiver.__doc__ is not None:
quiver.__doc__ = dedent(Axes.quiver.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def quiverkey(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().quiverkey(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.quiverkey.__doc__ is not None:
quiverkey.__doc__ = dedent(Axes.quiverkey.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def scatter(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().scatter(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.scatter.__doc__ is not None:
scatter.__doc__ = dedent(Axes.scatter.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogx(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogx(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogx.__doc__ is not None:
semilogx.__doc__ = dedent(Axes.semilogx.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def semilogy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().semilogy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.semilogy.__doc__ is not None:
semilogy.__doc__ = dedent(Axes.semilogy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def specgram(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().specgram(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret[-1]
hold(b)
return ret
if Axes.specgram.__doc__ is not None:
specgram.__doc__ = dedent(Axes.specgram.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spy(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().spy(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
gci._current = ret
hold(b)
return ret
if Axes.spy.__doc__ is not None:
spy.__doc__ = dedent(Axes.spy.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def stem(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().stem(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.stem.__doc__ is not None:
stem.__doc__ = dedent(Axes.stem.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def step(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().step(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.step.__doc__ is not None:
step.__doc__ = dedent(Axes.step.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def vlines(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().vlines(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.vlines.__doc__ is not None:
vlines.__doc__ = dedent(Axes.vlines.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def xcorr(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().xcorr(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.xcorr.__doc__ is not None:
xcorr.__doc__ = dedent(Axes.xcorr.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def barbs(*args, **kwargs):
# allow callers to override the hold state by passing hold=True|False
b = ishold()
h = kwargs.pop('hold', None)
if h is not None:
hold(h)
try:
ret = gca().barbs(*args, **kwargs)
draw_if_interactive()
except:
hold(b)
raise
hold(b)
return ret
if Axes.barbs.__doc__ is not None:
barbs.__doc__ = dedent(Axes.barbs.__doc__) + """
Additional kwargs: hold = [True|False] overrides default hold state"""
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cla(*args, **kwargs):
ret = gca().cla(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.cla.__doc__ is not None:
cla.__doc__ = dedent(Axes.cla.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def grid(*args, **kwargs):
ret = gca().grid(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.grid.__doc__ is not None:
grid.__doc__ = dedent(Axes.grid.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def legend(*args, **kwargs):
ret = gca().legend(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.legend.__doc__ is not None:
legend.__doc__ = dedent(Axes.legend.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def table(*args, **kwargs):
ret = gca().table(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.table.__doc__ is not None:
table.__doc__ = dedent(Axes.table.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def text(*args, **kwargs):
ret = gca().text(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.text.__doc__ is not None:
text.__doc__ = dedent(Axes.text.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def annotate(*args, **kwargs):
ret = gca().annotate(*args, **kwargs)
draw_if_interactive()
return ret
if Axes.annotate.__doc__ is not None:
annotate.__doc__ = dedent(Axes.annotate.__doc__)
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def autumn():
'''
set the default colormap to autumn and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='autumn')
im = gci()
if im is not None:
im.set_cmap(cm.autumn)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def bone():
'''
set the default colormap to bone and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='bone')
im = gci()
if im is not None:
im.set_cmap(cm.bone)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def cool():
'''
set the default colormap to cool and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='cool')
im = gci()
if im is not None:
im.set_cmap(cm.cool)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def copper():
'''
set the default colormap to copper and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='copper')
im = gci()
if im is not None:
im.set_cmap(cm.copper)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def flag():
'''
set the default colormap to flag and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='flag')
im = gci()
if im is not None:
im.set_cmap(cm.flag)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def gray():
'''
set the default colormap to gray and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='gray')
im = gci()
if im is not None:
im.set_cmap(cm.gray)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hot():
'''
set the default colormap to hot and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hot')
im = gci()
if im is not None:
im.set_cmap(cm.hot)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def hsv():
'''
set the default colormap to hsv and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='hsv')
im = gci()
if im is not None:
im.set_cmap(cm.hsv)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def jet():
'''
set the default colormap to jet and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='jet')
im = gci()
if im is not None:
im.set_cmap(cm.jet)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def pink():
'''
set the default colormap to pink and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='pink')
im = gci()
if im is not None:
im.set_cmap(cm.pink)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def prism():
'''
set the default colormap to prism and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='prism')
im = gci()
if im is not None:
im.set_cmap(cm.prism)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spring():
'''
set the default colormap to spring and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spring')
im = gci()
if im is not None:
im.set_cmap(cm.spring)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def summer():
'''
set the default colormap to summer and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='summer')
im = gci()
if im is not None:
im.set_cmap(cm.summer)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def winter():
'''
set the default colormap to winter and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='winter')
im = gci()
if im is not None:
im.set_cmap(cm.winter)
draw_if_interactive()
# This function was autogenerated by boilerplate.py. Do not edit as
# changes will be lost
def spectral():
'''
set the default colormap to spectral and apply to current image if any.
See help(colormaps) for more information
'''
rc('image', cmap='spectral')
im = gci()
if im is not None:
im.set_cmap(cm.spectral)
draw_if_interactive()
| gpl-3.0 |
pkruskal/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
fredhusser/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 248 | 6359 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. Journal of Computational and
Graphical Statistics. December 1, 2005, 14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
allenlavoie/tensorflow | tensorflow/examples/learn/boston.py | 75 | 2549 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNRegressor for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import datasets
from sklearn import metrics
from sklearn import model_selection
from sklearn import preprocessing
import tensorflow as tf
def main(unused_argv):
# Load dataset
boston = datasets.load_boston()
x, y = boston.data, boston.target
# Split dataset into train / test
x_train, x_test, y_train, y_test = model_selection.train_test_split(
x, y, test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
x_train = scaler.fit_transform(x_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
feature_columns = [
tf.feature_column.numeric_column('x', shape=np.array(x_train).shape[1:])]
regressor = tf.estimator.DNNRegressor(
feature_columns=feature_columns, hidden_units=[10, 10])
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': x_train}, y=y_train, batch_size=1, num_epochs=None, shuffle=True)
regressor.train(input_fn=train_input_fn, steps=2000)
# Predict.
x_transformed = scaler.transform(x_test)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={'x': x_transformed}, y=y_test, num_epochs=1, shuffle=False)
predictions = regressor.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['predictions'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score_sklearn = metrics.mean_squared_error(y_predicted, y_test)
print('MSE (sklearn): {0:f}'.format(score_sklearn))
# Score with tensorflow.
scores = regressor.evaluate(input_fn=test_input_fn)
print('MSE (tensorflow): {0:f}'.format(scores['average_loss']))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
xwolf12/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
mclaughlin6464/pylearn2 | pylearn2/training_algorithms/sgd.py | 1 | 48347 | """
Stochastic Gradient Descent and related functionality such as
learning rate adaptation, momentum, and Polyak averaging.
"""
from __future__ import division
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow, David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "David Warde-Farley"
__email__ = "pylearn-dev@googlegroups"
import logging
import warnings
import numpy as np
from theano.compat import six
from theano import config
from theano import function
from theano.gof.op import get_debug_values
from pylearn2.compat import OrderedDict, first_key
from pylearn2.monitor import Monitor
from pylearn2.space import CompositeSpace, NullSpace
from pylearn2.train_extensions import TrainExtension
from pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm
from pylearn2.training_algorithms.learning_rule import Momentum
from pylearn2.training_algorithms.learning_rule import (
MomentumAdjustor as LRMomentumAdjustor)
from pylearn2.utils.iteration import is_stochastic, has_uniform_batch_size
from pylearn2.utils import py_integer_types, py_float_types
from pylearn2.utils import safe_zip
from pylearn2.utils import serial
from pylearn2.utils import sharedX
from pylearn2.utils import contains_nan
from pylearn2.utils import contains_inf
from pylearn2.utils import isfinite
from pylearn2.utils.data_specs import DataSpecsMapping
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.timing import log_timing
from pylearn2.utils.rng import make_np_rng
log = logging.getLogger(__name__)
class SGD(TrainingAlgorithm):
"""
SGD = (Minibatch) Stochastic Gradient Descent.
A TrainingAlgorithm that does stochastic gradient descent on
minibatches of training examples.
For theoretical background on this algorithm, see Yoshua Bengio's
machine learning course notes on the subject:
http://www.iro.umontreal.ca/~pift6266/H10/notes/gradient.html
Parameters
----------
learning_rate : float
The learning rate to use. Train object callbacks can change the
learning rate after each epoch. SGD update_callbacks can change
it after each minibatch.
cost : pylearn2.costs.cost.Cost, optional
Cost object specifying the objective function to be minimized.
Optionally, may be None. In this case, SGD will call the model's
get_default_cost method to obtain the objective function.
batch_size : int, optional
The size of the batch to be used.
If not specified, the model will be asked for the batch size, so
you must have specified the batch size there.
(Some models are rigidly defined to only work with one batch size)
monitoring_batch_size : int, optional
The size of the monitoring batches.
monitoring_batches : int, optional
At the start of each epoch, we run "monitoring", to evaluate
quantities such as the validation set error.
monitoring_batches, if specified, determines the number of batches
to draw from the iterator for each monitoring dataset.
Unnecessary if not using monitoring or if `monitor_iteration_mode`
is 'sequential' and `batch_size` is specified (number of
batches will be calculated based on full dataset size).
TODO: make it possible to specify different monitoring_batches
for each monitoring dataset. The Monitor itself already supports
this.
monitoring_dataset : Dataset or dictionary, optional
If not specified, no monitoring is used.
If specified to be a Dataset, monitor on that Dataset.
If specified to be dictionary, the keys should be string names
of datasets, and the values should be Datasets. All monitoring
channels will be computed for all monitoring Datasets and will
have the dataset name and an underscore prepended to them.
monitor_iteration_mode : str, optional
The iteration mode used to iterate over the examples in all
monitoring datasets. If not specified, defaults to 'sequential'.
TODO: make it possible to specify different modes for different
datasets.
termination_criterion : instance of \
pylearn2.termination_criteria.TerminationCriterion, optional
Used to determine when the algorithm should stop running.
If not specified, runs forever--or more realistically, until
external factors halt the python process (Kansas 1977).
update_callbacks : list, optional
If specified, each member of the list should be a callable that
accepts an SGD instance as its only argument.
All callbacks will be called with this SGD instance after each
SGD step.
learning_rule : training_algorithms.learning_rule.LearningRule, optional
A learning rule computes the new parameter values given old
parameters and first-order gradients. If learning_rule is None,
sgd.SGD will update parameters according to the standard SGD
learning rule:
.. code-block:: none
param := param - learning_rate * d cost / d param
This argument allows more sophisticated learning rules, such
as SGD with momentum.
set_batch_size : bool, optional
Defaults to False.
If True, and batch_size conflicts with model.force_batch_size,
will call model.set_batch_size(batch_size) in an attempt to
change model.force_batch_size
train_iteration_mode : str, optional
Defaults to 'shuffled_sequential'.
The iteration mode to use for iterating through training examples.
batches_per_iter : int, optional
The number of batches to draw from the iterator over training
examples.
If iteration mode is 'sequential' or 'shuffled_sequential', this
is unnecessary; when unspecified we will iterate over all examples.
theano_function_mode : a valid argument to theano.function's \
'mode' parameter, optional
The theano mode to compile the updates function with. Note that
pylearn2 includes some wraplinker modes that are not bundled with
theano. See pylearn2.devtools. These extra modes let you do
things like check for NaNs at every step, or record md5 digests
of all computations performed by the update function to help
isolate problems with nondeterminism.
monitoring_costs : OrderedDict, optional
A dictionary of Cost instances. Keys should be string containing
the name of the cost. The Monitor will also include all
channels defined by these Costs, even though we don't train
using them.
seed : valid argument to np.random.RandomState, optional
The seed used for the random number generate to be passed to the
training dataset iterator (if any)
"""
def __init__(self, learning_rate, cost=None, batch_size=None,
monitoring_batch_size=None, monitoring_batches=None,
monitoring_dataset=None,
monitor_iteration_mode='sequential',
termination_criterion=None, update_callbacks=None,
learning_rule=None, set_batch_size=False,
train_iteration_mode=None, batches_per_iter=None,
theano_function_mode=None, monitoring_costs=None,
seed=[2012, 10, 5]):
if isinstance(cost, (list, tuple, set)):
raise TypeError("SGD no longer supports using collections of " +
"Costs to represent a sum of Costs. Use " +
"pylearn2.costs.cost.SumOfCosts instead.")
self.learning_rule = learning_rule
self.learning_rate = sharedX(learning_rate, 'learning_rate')
self.cost = cost
self.batch_size = batch_size
self.set_batch_size = set_batch_size
self.batches_per_iter = batches_per_iter
self._set_monitoring_dataset(monitoring_dataset)
self.monitoring_batch_size = monitoring_batch_size
self.monitoring_batches = monitoring_batches
self.monitor_iteration_mode = monitor_iteration_mode
if monitoring_dataset is None:
if monitoring_batch_size is not None:
raise ValueError("Specified a monitoring batch size " +
"but not a monitoring dataset.")
if monitoring_batches is not None:
raise ValueError("Specified an amount of monitoring batches " +
"but not a monitoring dataset.")
self.termination_criterion = termination_criterion
self._register_update_callbacks(update_callbacks)
if train_iteration_mode is None:
train_iteration_mode = 'shuffled_sequential'
self.train_iteration_mode = train_iteration_mode
self.first = True
self.rng = make_np_rng(seed, which_method=["randn", "randint"])
self.theano_function_mode = theano_function_mode
self.monitoring_costs = monitoring_costs
def _setup_monitor(self):
"""
Set up monitor to model the objective value, learning rate,
momentum (if applicable), and extra channels defined by
the cost.
This method must be called after `learning_rule.get_updates`,
since it may have an effect on `learning_rule.add_channels_to_monitor`
(that is currently the case for `learning_rule.RMSProp`).
"""
if bool(self.monitoring_dataset):
if (self.monitoring_batch_size is None and
self.monitoring_batches is None):
self.monitoring_batch_size = self.batch_size
self.monitoring_batches = self.batches_per_iter
self.monitor.setup(dataset=self.monitoring_dataset,
cost=self.cost,
batch_size=self.monitoring_batch_size,
num_batches=self.monitoring_batches,
extra_costs=self.monitoring_costs,
mode=self.monitor_iteration_mode)
dataset_name = first_key(self.monitoring_dataset)
monitoring_dataset = self.monitoring_dataset[dataset_name]
# TODO: have Monitor support non-data-dependent channels
self.monitor.add_channel(name='learning_rate',
ipt=None,
val=self.learning_rate,
data_specs=(NullSpace(), ''),
dataset=monitoring_dataset)
if self.learning_rule:
self.learning_rule.add_channels_to_monitor(
self.monitor,
monitoring_dataset)
def setup(self, model, dataset):
"""
Compiles the theano functions needed for the train method.
Parameters
----------
model : a Model instance
dataset : Dataset
"""
if self.cost is None:
self.cost = model.get_default_cost()
inf_params = [param for param in model.get_params()
if contains_inf(param.get_value())]
if len(inf_params) > 0:
raise ValueError("These params are Inf: "+str(inf_params))
if any([contains_nan(param.get_value())
for param in model.get_params()]):
nan_params = [param for param in model.get_params()
if contains_nan(param.get_value())]
raise ValueError("These params are NaN: "+str(nan_params))
self.model = model
self._synchronize_batch_size(model)
model._test_batch_size = self.batch_size
self.monitor = Monitor.get_monitor(model)
self.monitor._sanity_check()
# test if force batch size and batch size
has_force_batch_size = getattr(model, "force_batch_size", False)
train_dataset_is_uneven = \
dataset.get_num_examples() % self.batch_size != 0
has_monitoring_datasets = bool(self.monitoring_dataset)
if has_monitoring_datasets:
monitoring_datasets_are_uneven = \
any(d.get_num_examples() % self.batch_size
!= 0 for d in self.monitoring_dataset.values())
else:
monitoring_datasets_are_uneven = False # or True it doesn't matter
if has_force_batch_size and train_dataset_is_uneven and \
not has_uniform_batch_size(self.train_iteration_mode):
raise ValueError("Dataset size is not a multiple of batch size."
"You should set train_iteration_mode (and "
"maybe monitor_iteration_mode) to "
"even_sequential, even_shuffled_sequential or "
"even_batchwise_shuffled_sequential")
if has_force_batch_size and has_monitoring_datasets and \
monitoring_datasets_are_uneven and \
not has_uniform_batch_size(self.monitor_iteration_mode):
raise ValueError("Dataset size is not a multiple of batch size."
"You should set monitor_iteration_mode to "
"even_sequential, even_shuffled_sequential or "
"even_batchwise_shuffled_sequential")
data_specs = self.cost.get_data_specs(self.model)
mapping = DataSpecsMapping(data_specs)
space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
# Build a flat tuple of Theano Variables, one for each space.
# We want that so that if the same space/source is specified
# more than once in data_specs, only one Theano Variable
# is generated for it, and the corresponding value is passed
# only once to the compiled Theano function.
theano_args = []
for space, source in safe_zip(space_tuple, source_tuple):
name = '%s[%s]' % (self.__class__.__name__, source)
arg = space.make_theano_batch(name=name,
batch_size=self.batch_size)
theano_args.append(arg)
theano_args = tuple(theano_args)
# Methods of `self.cost` need args to be passed in a format compatible
# with data_specs
nested_args = mapping.nest(theano_args)
fixed_var_descr = self.cost.get_fixed_var_descr(model, nested_args)
self.on_load_batch = fixed_var_descr.on_load_batch
cost_value = self.cost.expr(model, nested_args,
** fixed_var_descr.fixed_vars)
if cost_value is not None and cost_value.name is None:
# Concatenate the name of all tensors in theano_args !?
cost_value.name = 'objective'
learning_rate = self.learning_rate
params = list(model.get_params())
assert len(params) > 0
for i, param in enumerate(params):
if param.name is None:
param.name = 'sgd_params[%d]' % i
grads, updates = self.cost.get_gradients(model, nested_args,
** fixed_var_descr.fixed_vars)
if not isinstance(grads, OrderedDict):
raise TypeError(str(type(self.cost)) + ".get_gradients returned " +
"something with" + str(type(grads)) + "as its " +
"first member. Expected OrderedDict.")
for param in grads:
assert param in params
for param in params:
assert param in grads
for param in grads:
if grads[param].name is None and cost_value is not None:
grads[param].name = ('grad(%(costname)s, %(paramname)s)' %
{'costname': cost_value.name,
'paramname': param.name})
assert grads[param].dtype == param.dtype
lr_scalers = model.get_lr_scalers()
for key in lr_scalers:
if key not in params:
raise ValueError(
"Tried to scale the learning rate on " +
str(key) + " which is not an optimization parameter.")
log.info('Parameter and initial learning rate summary:')
for param in params:
param_name = param.name
if param_name is None:
param_name = 'anon_param'
lr = learning_rate.get_value() * lr_scalers.get(param, 1.)
log.info('\t' + param_name + ': ' + str(lr))
if self.learning_rule:
updates.update(self.learning_rule.get_updates(
learning_rate, grads, lr_scalers))
else:
# Use standard SGD updates with fixed learning rate.
updates.update(dict(safe_zip(params, [param - learning_rate *
lr_scalers.get(param, 1.) * grads[param]
for param in params])))
for param in params:
if updates[param].name is None:
updates[param].name = 'sgd_update(' + param.name + ')'
model.modify_updates(updates)
for param in params:
update = updates[param]
if update.name is None:
update.name = 'censor(sgd_update(' + param.name + '))'
for update_val in get_debug_values(update):
if contains_inf(update_val):
raise ValueError("debug value of %s contains infs" %
update.name)
if contains_nan(update_val):
raise ValueError("debug value of %s contains nans" %
update.name)
# Set up monitor to model the objective value, learning rate,
# momentum (if applicable), and extra channels defined by
# the cost.
# We have to do that after learning_rule.get_updates has been
# called, since it may have an effect on
# learning_rule.add_channels_to_monitor (that is currently the case
# for AdaDelta and RMSProp).
self._setup_monitor()
with log_timing(log, 'Compiling sgd_update'):
self.sgd_update = function(theano_args,
updates=updates,
name='sgd_update',
on_unused_input='ignore',
mode=self.theano_function_mode)
self.params = params
def train(self, dataset):
"""
Runs one epoch of SGD training on the specified dataset.
Parameters
----------
dataset : Dataset
"""
if not hasattr(self, 'sgd_update'):
raise Exception("train called without first calling setup")
# Make sure none of the parameters have bad values
for param in self.params:
value = param.get_value(borrow=True)
value
if not isfinite(value):
raise RuntimeError("NaN in " + param.name)
self.first = False
rng = self.rng
if not is_stochastic(self.train_iteration_mode):
rng = None
data_specs = self.cost.get_data_specs(self.model)
# The iterator should be built from flat data specs, so it returns
# flat, non-redundent tuples of data.
mapping = DataSpecsMapping(data_specs)
space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
if len(space_tuple) == 0:
# No data will be returned by the iterator, and it is impossible
# to know the size of the actual batch.
# It is not decided yet what the right thing to do should be.
raise NotImplementedError(
"Unable to train with SGD, because "
"the cost does not actually use data from the data set. "
"data_specs: %s" % str(data_specs))
flat_data_specs = (CompositeSpace(space_tuple), source_tuple)
iterator = dataset.iterator(mode=self.train_iteration_mode,
batch_size=self.batch_size,
data_specs=flat_data_specs,
return_tuple=True, rng=rng,
num_batches=self.batches_per_iter)
on_load_batch = self.on_load_batch
for batch in iterator:
for callback in on_load_batch:
callback(*batch)
self.sgd_update(*batch)
# iterator might return a smaller batch if dataset size
# isn't divisible by batch_size
# Note: if data_specs[0] is a NullSpace, there is no way to know
# how many examples would actually have been in the batch,
# since it was empty, so actual_batch_size would be reported as 0.
actual_batch_size = flat_data_specs[0].np_batch_size(batch)
self.monitor.report_batch(actual_batch_size)
for callback in self.update_callbacks:
callback(self)
# Make sure none of the parameters have bad values
for param in self.params:
value = param.get_value(borrow=True)
value
if not isfinite(value):
print value
from matplotlib import pyplot as plt
plt.imshow(value)
plt.show()
raise RuntimeError("NaN in " + param.name)
def continue_learning(self, model):
"""
Returns True if the algorithm should continue running, or False
if it has reached convergence / started overfitting and should
stop.
Parameters
----------
model : a Model instance
"""
if self.termination_criterion is None:
return True
else:
return self.termination_criterion.continue_learning(self.model)
class MonitorBasedLRAdjuster(TrainExtension):
"""
A TrainExtension that uses the on_monitor callback to adjust
the learning rate on each epoch. It pulls out a channel
from the model's monitor and adjusts the learning rate
based on what happened to the monitoring channel on the last
epoch. If the channel is greater than high_trigger times
its previous value, the learning rate will be scaled by
shrink_amt (which should be < 1 for this scheme to make
sense). The idea is that in this case the learning algorithm
is overshooting the bottom of the objective function.
If the objective is less than high_trigger but
greater than low_trigger times its previous value, the
learning rate will be scaled by grow_amt (which should be > 1
for this scheme to make sense). The idea is that the learning
algorithm is making progress but at too slow of a rate.
Parameters
----------
high_trigger : float, optional
See class-level docstring
low_trigger : float, optional
See class-level docstring
grow_amt : float, optional
See class-level docstring
min_lr : float, optional
All updates to the learning rate are clipped to be at least
this value.
max_lr : float, optional
All updates to the learning rate are clipped to be at most
this value.
dataset_name : str, optional
If specified, use dataset_name + "_objective" as the channel
to guide the learning rate adaptation.
channel_name : str, optional
If specified, use channel_name as the channel to guide the
learning rate adaptation. Conflicts with dataset_name.
If neither dataset_name nor channel_name is specified, uses
"objective"
"""
def __init__(self, high_trigger=1., shrink_amt=.99,
low_trigger=.99, grow_amt=1.01,
min_lr=1e-7, max_lr=1.,
dataset_name=None, channel_name=None):
self.high_trigger = high_trigger
self.shrink_amt = shrink_amt
self.low_trigger = low_trigger
self.grow_amt = grow_amt
self.min_lr = min_lr
self.max_lr = max_lr
self.dataset_name = None
if channel_name is not None:
self.channel_name = channel_name
else:
if dataset_name is not None:
self.channel_name = dataset_name + '_objective'
self.dataset_name = dataset_name
else:
self.channel_name = None
def on_monitor(self, model, dataset, algorithm):
"""
Adjusts the learning rate based on the contents of model.monitor
Parameters
----------
model : a Model instance
dataset : Dataset
algorithm : WRITEME
"""
model = algorithm.model
lr = algorithm.learning_rate
current_learning_rate = lr.get_value()
assert hasattr(model, 'monitor'), ("no monitor associated with "
+ str(model))
monitor = model.monitor
monitor_channel_specified = True
if self.channel_name is None:
monitor_channel_specified = False
channels = [elem for elem in monitor.channels
if elem.endswith("objective")]
if len(channels) < 1:
raise ValueError(
"There are no monitoring channels that end "
"with \"objective\". Please specify either "
"channel_name or dataset_name.")
elif len(channels) > 1:
datasets = algorithm.monitoring_dataset.keys()
raise ValueError(
"There are multiple monitoring channels that"
"end with \"_objective\". The list of available "
"datasets are: " +
str(datasets) + " . Please specify either "
"channel_name or dataset_name in the "
"MonitorBasedLRAdjuster constructor to "
'disambiguate.')
else:
self.channel_name = channels[0]
warnings.warn('The channel that has been chosen for '
'monitoring is: ' + str(self.channel_name) + '.')
try:
v = monitor.channels[self.channel_name].val_record
except KeyError:
err_input = ''
if monitor_channel_specified:
if self.dataset_name:
err_input = 'The dataset_name \'' + str(
self.dataset_name) + '\' is not valid.'
else:
err_input = 'The channel_name \'' + str(
self.channel_name) + '\' is not valid.'
err_message = 'There is no monitoring channel named \'' + \
str(self.channel_name) + '\'. You probably need to ' + \
'specify a valid monitoring channel by using either ' + \
'dataset_name or channel_name in the ' + \
'MonitorBasedLRAdjuster constructor. ' + err_input
reraise_as(ValueError(err_message))
if len(v) < 1:
if monitor.dataset is None:
assert len(v) == 0
raise ValueError(
"You're trying to use a monitor-based "
"learning rate adjustor but the monitor has no "
"entries because you didn't specify a "
"monitoring dataset.")
raise ValueError(
"For some reason there are no monitor entries"
"yet the MonitorBasedLRAdjuster has been "
"called. This should never happen. The Train"
" object should call the monitor once on "
"initialization, then call the callbacks. "
"It seems you are either calling the "
"callback manually rather than as part of a "
"training algorithm, or there is a problem "
"with the Train object.")
if len(v) == 1:
# only the initial monitoring has happened
# no learning has happened, so we can't adjust learning rate yet
# just do nothing
return
rval = current_learning_rate
log.info("monitoring channel is {0}".format(self.channel_name))
if v[-1] > self.high_trigger * v[-2]:
rval *= self.shrink_amt
log.info("shrinking learning rate to %f" % rval)
elif v[-1] > self.low_trigger * v[-2]:
rval *= self.grow_amt
log.info("growing learning rate to %f" % rval)
rval = max(self.min_lr, rval)
rval = min(self.max_lr, rval)
lr.set_value(np.cast[lr.dtype](rval))
class PatienceBasedTermCrit(object):
"""
A monitor-based termination criterion using a geometrically increasing
amount of patience. If the selected channel has decreased by a certain
proportion when comparing to the lowest value seen yet, the patience is
set to a factor of the number of examples seen, which by default
(patience_increase=2.) ensures the model has seen as many examples as the
number of examples that lead to the lowest value before concluding a local
optima has been reached.
Note: Technically, the patience corresponds to a number of epochs to be
independent of the size of the dataset, so be aware of that when choosing
initial_patience.
Parameters
----------
prop_decrease : float
The factor X in the (1 - X) * best_value threshold
initial_patience : int
Minimal number of epochs the model has to run before it can stop
patience_increase : float, optional
The factor X in the patience = X * n_iter update.
channel_name : string, optional
Name of the channel to examine. If None and the monitor
has only one channel, this channel will be used; otherwise, an
error will be raised.
"""
def __init__(self, prop_decrease, initial_patience,
patience_increase=2., channel_name=None):
self._channel_name = channel_name
self.prop_decrease = prop_decrease
self.patience = initial_patience
self.best_value = np.inf
self.patience_increase = patience_increase
def __call__(self, model):
"""
Returns True or False depending on whether the optimization should
stop or not. The optimization should stop if it has run for a number
of epochs superior to the patience without any improvement.
Parameters
----------
model : Model
The model used in the experiment and from which the monitor used
in the termination criterion will be extracted.
Returns
-------
bool
True or False, indicating if the optimization should stop or not.
"""
monitor = model.monitor
# In the case the monitor has only one channel, the channel_name can
# be omitted and the criterion will examine the only channel
# available. However, if the monitor has multiple channels, leaving
# the channel_name unspecified will raise an error.
if self._channel_name is None:
if len(monitor.channels) != 1:
raise ValueError("Only single-channel monitors are supported "
"for channel_name == None")
v = monitor.channels.values()[0].val_record
else:
v = monitor.channels[self._channel_name].val_record
# If the channel value decrease is higher than the threshold, we
# update the best value to this value and we update the patience.
if v[-1] < self.best_value * (1. - self.prop_decrease):
# Using the max between actual patience and updated patience
# ensures that the model will run for at least the initial
# patience and that it would behave correctly if the user
# chooses a dumb value (i.e. less than 1)
self.patience = max(self.patience, len(v) * self.patience_increase)
self.best_value = v[-1]
return len(v) < self.patience
class AnnealedLearningRate(object):
"""
This is a callback for the SGD algorithm rather than the Train object.
This anneals the learning rate to decrease as 1/t where t is the number
of gradient descent updates done so far. Use OneOverEpoch as Train object
callback if you would prefer 1/t where t is epochs.
Parameters
----------
anneal_start : int
The epoch on which to begin annealing
"""
def __init__(self, anneal_start):
self._initialized = False
self._count = 0
self._anneal_start = anneal_start
def __call__(self, algorithm):
"""
Updates the learning rate according to the annealing schedule.
Parameters
----------
algorithm : WRITEME
"""
if not self._initialized:
self._base = algorithm.learning_rate.get_value()
self._initialized = True
self._count += 1
algorithm.learning_rate.set_value(np.cast[config.floatX](
self.current_learning_rate()))
def current_learning_rate(self):
"""
Returns the current desired learning rate according to the
annealing schedule.
"""
return self._base * min(1, self._anneal_start / self._count)
class ExponentialDecay(object):
"""
This is a callback for the `SGD` algorithm rather than the `Train` object.
This anneals the learning rate by dividing by decay_factor after each
gradient descent step. It will not shrink the learning rate beyond
`min_lr`.
Parameters
----------
decay_factor : float
The learning rate at step t is given by
`init_learning_rate / (decay_factor ** t)`
min_lr : float
The learning rate will be clipped to be at least this value
"""
def __init__(self, decay_factor, min_lr):
if isinstance(decay_factor, str):
decay_factor = float(decay_factor)
if isinstance(min_lr, str):
min_lr = float(min_lr)
assert isinstance(decay_factor, float)
assert isinstance(min_lr, float)
self.__dict__.update(locals())
del self.self
self._count = 0
self._min_reached = False
def __call__(self, algorithm):
"""
Updates the learning rate according to the exponential decay schedule.
Parameters
----------
algorithm : SGD
The SGD instance whose `learning_rate` field should be modified.
"""
if self._count == 0:
self._base_lr = algorithm.learning_rate.get_value()
self._count += 1
if not self._min_reached:
# If we keep on executing the exponentiation on each mini-batch,
# we will eventually get an OverflowError. So make sure we
# only do the computation until min_lr is reached.
new_lr = self._base_lr / (self.decay_factor ** self._count)
if new_lr <= self.min_lr:
self._min_reached = True
new_lr = self.min_lr
else:
new_lr = self.min_lr
new_lr = np.cast[config.floatX](new_lr)
algorithm.learning_rate.set_value(new_lr)
class LinearDecay(object):
"""
This is a callback for the SGD algorithm rather than the Train object.
This anneals the learning rate to decay_factor times of the initial value
during time start till saturate.
Parameters
----------
start : int
The step at which to start decreasing the learning rate
saturate : int
The step at which to stop decreating the learning rate
decay_factor : float
`final learning rate = decay_factor * initial learning rate`
"""
def __init__(self, start, saturate, decay_factor):
if isinstance(decay_factor, str):
decay_factor = float(decay_factor)
if isinstance(start, str):
start = float(start)
if isinstance(saturate, str):
saturate = float(saturate)
assert isinstance(decay_factor, float)
assert isinstance(start, (py_integer_types, py_float_types))
assert isinstance(saturate, (py_integer_types, py_float_types))
assert saturate > start
assert start > 0
self.__dict__.update(locals())
del self.self
self._count = 0
def __call__(self, algorithm):
"""
Adjusts the learning rate according to the linear decay schedule
Parameters
----------
algorithm : WRITEME
"""
if self._count == 0:
self._base_lr = algorithm.learning_rate.get_value()
self._step = ((self._base_lr - self._base_lr * self.decay_factor) /
(self.saturate - self.start + 1))
self._count += 1
if self._count >= self.start:
if self._count < self.saturate:
new_lr = self._base_lr - self._step * (self._count
- self.start + 1)
else:
new_lr = self._base_lr * self.decay_factor
else:
new_lr = self._base_lr
assert new_lr > 0
new_lr = np.cast[config.floatX](new_lr)
algorithm.learning_rate.set_value(new_lr)
class EpochMonitor(object):
"""
This is a callback for the SGD algorithm rather than the Train object.
It can log one-line progress summaries and/or full monitor updates at
regular intervals within epochs, which can be useful for large datasets.
Note that each monitor update increases the calculation time of the epoch.
Parameters
----------
model : pylearn2 model instance
The model being monitored
tick_rate : int (optional)
Log one-line updates every `tick_rate` batches
monitor_rate : int (optional)
Call full monitor updates within epochs every `monitor_rate` batches
YAML usage
----------
model: &model !obj:pylearn2.models.mlp.MLP {
...
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
update_callbacks: [
!obj:pylearn2.training_algorithms.sgd.EpochMonitor {
model: *model,
tick_rate: 20,
monitor_rate: 110 }],
...
}
"""
def __init__(self, model, tick_rate=None, monitor_rate=None):
self.model = model
self.tick_rate = tick_rate
self.monitor_rate = monitor_rate
self.batches = 0
self.epoch = 1
def __call__(self, algorithm):
if self.model.monitor.get_epochs_seen() == self.epoch:
self.epoch += 1
self.batches = 0
else:
self.batches += 1
if self.monitor_rate and self.batches and not (
self.batches % self.monitor_rate):
self.model.monitor.__call__()
elif self.tick_rate and not self.batches % self.tick_rate:
log.info('Epoch {}: {} batches seen'.format(
self.epoch, self.batches))
class OneOverEpoch(TrainExtension):
"""
Scales the learning rate like one over # epochs
Parameters
----------
start : int
The epoch on which to start shrinking the learning rate
half_life : int, optional
How many epochs after start it will take for the learning rate to lose
half its value for the first time (to lose the next half of its value
will take twice as long)
min_lr : float, optional
The minimum value the learning rate can take on
"""
def __init__(self, start, half_life=None, min_lr=1e-6):
self.__dict__.update(locals())
del self.self
self._initialized = False
self._count = 0
assert start >= 0
if half_life is None:
self.half_life = start + 1
else:
assert half_life > 0
def on_monitor(self, model, dataset, algorithm):
"""
Adjusts the learning rate according to the decay schedule.
Parameters
----------
model : a Model instance
dataset : Dataset
algorithm : WRITEME
"""
if not self._initialized:
self._init_lr = algorithm.learning_rate.get_value()
if self._init_lr < self.min_lr:
raise ValueError("The initial learning rate is smaller than " +
"the minimum allowed learning rate.")
self._initialized = True
self._count += 1
algorithm.learning_rate.set_value(np.cast[config.floatX](
self.current_lr()))
def current_lr(self):
"""
Returns the learning rate currently desired by the decay schedule.
"""
if self._count < self.start:
scale = 1
else:
scale = float(self.half_life) / float(self._count -
self.start + self.half_life)
lr = self._init_lr * scale
clipped = max(self.min_lr, lr)
return clipped
class LinearDecayOverEpoch(TrainExtension):
"""
Scales the learning rate linearly on each epochs
Parameters
----------
start : int
The epoch on which to start shrinking the learning rate
saturate : int
The epoch to saturate the shrinkage
decay_factor : float
The final value would be initial learning rate times decay_factor
"""
def __init__(self, start, saturate, decay_factor):
self.__dict__.update(locals())
del self.self
self._initialized = False
self._count = 0
assert isinstance(decay_factor, float)
assert isinstance(start, (py_integer_types, py_float_types))
assert isinstance(saturate, (py_integer_types, py_float_types))
assert saturate > start
assert start >= 0
assert saturate >= start
def setup(self, model, dataset, algorithm):
"""
Initializes the decay schedule based on epochs_seen.
Parameters
----------
model : pylearn2.models.Model
The model to which the training algorithm is applied.
dataset : pylearn2.datasets.Dataset
The dataset to which the model is applied.
algorithm : pylearn2.training_algorithms.TrainingAlgorithm
Describes how gradients should be updated.
"""
monitor = Monitor.get_monitor(model)
self._count = monitor.get_epochs_seen()
self._apply_learning_rate(algorithm)
def on_monitor(self, model, dataset, algorithm):
"""
Updates the learning rate based on the linear decay schedule.
Parameters
----------
model : a Model instance
dataset : Dataset
algorithm : WRITEME
"""
self._count += 1
self._apply_learning_rate(algorithm)
def _apply_learning_rate(self, algorithm):
"""
Updates the learning rate on algorithm based on the epochs elapsed.
"""
if not self._initialized:
self._init_lr = algorithm.learning_rate.get_value()
self._step = ((self._init_lr - self._init_lr * self.decay_factor) /
(self.saturate - self.start + 1))
self._initialized = True
algorithm.learning_rate.set_value(np.cast[config.floatX](
self.current_lr()))
def current_lr(self):
"""
Returns the learning rate currently desired by the decay schedule.
"""
if self._count >= self.start:
if self._count < self.saturate:
new_lr = self._init_lr - self._step * (self._count
- self.start + 1)
else:
new_lr = self._init_lr * self.decay_factor
else:
new_lr = self._init_lr
assert new_lr > 0
return new_lr
class _PolyakWorker(object):
"""
Only to be used by the PolyakAveraging TrainingCallback below.
Do not use directly.
A callback for the SGD class.
Parameters
----------
model : a Model
The model whose parameters we want to train with Polyak averaging
"""
def __init__(self, model):
avg_updates = OrderedDict()
t = sharedX(1.)
self.param_to_mean = OrderedDict()
for param in model.get_params():
mean = sharedX(param.get_value())
assert type(mean) == type(param)
self.param_to_mean[param] = mean
avg_updates[mean] = mean - (mean - param) / t
avg_updates[t] = t + 1.
self.avg = function([], updates=avg_updates)
def __call__(self, algorithm):
"""
To be called after each SGD step.
Updates the Polyak averaged-parameters for this model
Parameters
----------
algorithm : WRITEME
"""
self.avg()
class PolyakAveraging(TrainExtension):
"""
See "A Tutorial on Stochastic Approximation Algorithms
for Training Restricted Boltzmann Machines and
Deep Belief Nets" by Kevin Swersky et al
This functionality is still a work in progress. Currently,
your model needs to implement "add_polyak_channels" to
use it.
The problem is that Polyak averaging shouldn't modify
the model parameters. It should keep a second copy
that it averages in the background. This second copy
doesn't get to come back in and affect the learning process
though.
(IG tried having the second copy get pushed back into
the model once per epoch, but this turned out to be
harmful, at least in limited tests)
So we need a cleaner interface for monitoring the
averaged copy of the parameters, and we need to make
sure the saved model at the end uses the averaged
parameters, not the parameters used for computing
the gradients during training.
TODO: make use of the new on_save callback instead
of duplicating Train's save_freq flag
Parameters
----------
start : int
The epoch after which to start averaging (0 = start averaging
immediately)
save_path : str, optional
WRITEME
save_freq : int, optional
WRITEME
Notes
-----
This is usually used with a fixed, rather than annealed learning
rate. It may be used in conjunction with momentum.
"""
def __init__(self, start, save_path=None, save_freq=1):
self.__dict__.update(locals())
del self.self
self._count = 0
assert isinstance(start, py_integer_types)
assert start >= 0
def on_monitor(self, model, dataset, algorithm):
"""
Make sure Polyak-averaged model gets monitored.
Save the model if necessary.
Parameters
----------
model : a Model instance
dataset : Dataset
algorithm : WRITEME
"""
if self._count == self.start:
self._worker = _PolyakWorker(model)
algorithm.update_callbacks.append(self._worker)
# HACK
try:
model.add_polyak_channels(self._worker.param_to_mean,
algorithm.monitoring_dataset)
except AttributeError:
pass
elif self.save_path is not None and self._count > self.start and \
self._count % self.save_freq == 0:
saved_params = OrderedDict()
for param in model.get_params():
saved_params[param] = param.get_value()
param.set_value(self._worker.param_to_mean[param].get_value())
serial.save(self.save_path, model)
for param in model.get_params():
param.set_value(saved_params[param])
self._count += 1
| bsd-3-clause |
jmmease/pandas | pandas/tests/frame/test_block_internals.py | 3 | 20693 | # -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
import itertools
from numpy import nan
import numpy as np
from pandas import (DataFrame, Series, Timestamp, date_range, compat,
option_context)
from pandas.compat import StringIO
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestDataFrameBlockInternals(TestData):
def test_cast_internals(self):
casted = DataFrame(self.frame._data, dtype=int)
expected = DataFrame(self.frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(self.frame._data, dtype=np.int32)
expected = DataFrame(self.frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self):
self.frame['E'] = 7.
consolidated = self.frame._consolidate()
assert len(consolidated._data.blocks) == 1
# Ensure copy, do I want this?
recons = consolidated._consolidate()
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
self.frame['F'] = 8.
assert len(self.frame._data.blocks) == 3
self.frame._consolidate(inplace=True)
assert len(self.frame._data.blocks) == 1
def test_consolidate_deprecation(self):
self.frame['E'] = 7
with tm.assert_produces_warning(FutureWarning):
self.frame.consolidate()
def test_consolidate_inplace(self):
frame = self.frame.copy() # noqa
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
self.frame[chr(letter)] = chr(letter)
def test_as_matrix_consolidate(self):
self.frame['E'] = 7.
assert not self.frame._data.is_consolidated()
_ = self.frame.as_matrix() # noqa
assert self.frame._data.is_consolidated()
def test_modify_values(self):
self.frame.values[5] = 5
assert (self.frame.values[5] == 5).all()
# unconsolidated
self.frame['E'] = 7.
self.frame.values[6] = 6
assert (self.frame.values[6] == 6).all()
def test_boolean_set_uncons(self):
self.frame['E'] = 7.
expected = self.frame.values.copy()
expected[expected > 1] = 2
self.frame[self.frame > 1] = 2
assert_almost_equal(expected, self.frame.values)
def test_as_matrix_numeric_cols(self):
self.frame['foo'] = 'bar'
values = self.frame.as_matrix(['A', 'B', 'C', 'D'])
assert values.dtype == np.float64
def test_as_matrix_lcd(self):
# mixed lcd
values = self.mixed_float.as_matrix(['A', 'B', 'C', 'D'])
assert values.dtype == np.float64
values = self.mixed_float.as_matrix(['A', 'B', 'C'])
assert values.dtype == np.float32
values = self.mixed_float.as_matrix(['C'])
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
values = self.mixed_int.as_matrix(['A', 'B', 'C', 'D'])
assert values.dtype == np.float64
values = self.mixed_int.as_matrix(['A', 'D'])
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
values = self.mixed_int.as_matrix(['A', 'B', 'C'])
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
values = self.mixed_int.as_matrix(['B', 'C'])
assert values.dtype == np.uint64
values = self.mixed_int.as_matrix(['A', 'C'])
assert values.dtype == np.int32
values = self.mixed_int.as_matrix(['C', 'D'])
assert values.dtype == np.int64
values = self.mixed_int.as_matrix(['A'])
assert values.dtype == np.int32
values = self.mixed_int.as_matrix(['C'])
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({'A': [2 ** 63 - 1]})
result = df['A']
expected = Series(np.asarray([2 ** 63 - 1], np.int64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2 ** 63]})
result = df['A']
expected = Series(np.asarray([2 ** 63], np.uint64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [datetime(2005, 1, 1), True]})
result = df['A']
expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_),
name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [None, 1]})
result = df['A']
expected = Series(np.asarray([np.nan, 1], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, 2]})
result = df['A']
expected = Series(np.asarray([1.0, 2], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3.0]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3.0], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, True]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, True], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, None]})
result = df['A']
expected = Series(np.asarray([1.0, np.nan], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, None]})
result = df['A']
expected = Series(np.asarray(
[1.0 + 2.0j, np.nan], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, True, None]})
result = df['A']
expected = Series(np.asarray(
[2.0, 1, True, None], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, datetime(2006, 1, 1), None]})
result = df['A']
expected = Series(np.asarray([2.0, 1, datetime(2006, 1, 1),
None], np.object_), name='A')
assert_series_equal(result, expected)
def test_construction_with_mixed(self):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check dtypes
result = df.get_dtype_counts().sort_values()
expected = Series({'datetime64[ns]': 3})
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1)
assert self.mixed_frame['datetime'].dtype == 'M8[ns]'
assert self.mixed_frame['timedelta'].dtype == 'm8[ns]'
result = self.mixed_frame.get_dtype_counts().sort_values()
expected = Series({'float64': 4,
'object': 1,
'datetime64[ns]': 1,
'timedelta64[ns]': 1}).sort_values()
assert_series_equal(result, expected)
def test_construction_with_conversions(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype='timedelta64[s]')
s = Series(arr)
expected = Series(pd.timedelta_range('00:00:01', periods=3, freq='s'))
assert_series_equal(s, expected)
df = DataFrame(index=range(3))
df['A'] = arr
expected = DataFrame({'A': pd.timedelta_range('00:00:01', periods=3,
freq='s')},
index=range(3))
assert_frame_equal(df, expected)
# convert from a numpy array of non-ns datetime64
# note that creating a numpy datetime64 is in LOCAL time!!!!
# seems to work for M8[D], but not for M8[s]
s = Series(np.array(['2013-01-01', '2013-01-02',
'2013-01-03'], dtype='datetime64[D]'))
assert_series_equal(s, Series(date_range('20130101', periods=3,
freq='D')))
# s = Series(np.array(['2013-01-01 00:00:01','2013-01-01
# 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]'))
# assert_series_equal(s,date_range('20130101
# 00:00:01',period=3,freq='s'))
expected = DataFrame({
'dt1': Timestamp('20130101'),
'dt2': date_range('20130101', periods=3),
# 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
}, index=range(3))
df = DataFrame(index=range(3))
df['dt1'] = np.datetime64('2013-01-01')
df['dt2'] = np.array(['2013-01-01', '2013-01-02', '2013-01-03'],
dtype='datetime64[D]')
# df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01
# 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
assert_frame_equal(df, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
data = list(itertools.repeat((datetime(2001, 1, 1),
"aa", 20), 9))
return DataFrame(data=data,
columns=["A", "B", "C"],
dtype=dtype)
pytest.raises(NotImplementedError, f,
[("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
# 10822
# invalid error message on dt inference
if not compat.is_platform_windows():
f('M8[ns]')
def test_equals_different_blocks(self):
# GH 9330
df0 = pd.DataFrame({"A": ["x", "y"], "B": [1, 2],
"C": ["w", "z"]})
df1 = df0.reset_index()[["A", "B", "C"]]
# this assert verifies that the above operations have
# induced a block rearrangement
assert (df0._data.blocks[0].dtype != df1._data.blocks[0].dtype)
# do the real tests
assert_frame_equal(df0, df1)
assert df0.equals(df1)
assert df1.equals(df0)
def test_copy_blocks(self):
# API/ENH 9607
df = DataFrame(self.frame, copy=True)
column = df.columns[0]
# use the default copy=True, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks()
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did not change the original DataFrame
assert not _df[column].equals(df[column])
def test_no_copy_blocks(self):
# API/ENH 9607
df = DataFrame(self.frame, copy=True)
column = df.columns[0]
# use the copy=False, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks(copy=False)
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did change the original DataFrame
assert _df[column].equals(df[column])
def test_copy(self):
cop = self.frame.copy()
cop['E'] = cop['A']
assert 'E' not in self.frame
# copy objects
copy = self.mixed_frame.copy()
assert copy._data is not self.mixed_frame._data
def test_pickle(self):
unpickled = tm.round_trip_pickle(self.mixed_frame)
assert_frame_equal(self.mixed_frame, unpickled)
# buglet
self.mixed_frame._data.ndim
# empty
unpickled = tm.round_trip_pickle(self.empty)
repr(unpickled)
# tz frame
unpickled = tm.round_trip_pickle(self.tzframe)
assert_frame_equal(self.tzframe, unpickled)
def test_consolidate_datetime64(self):
# numpy vstack bug
data = """\
starting,ending,measure
2012-06-21 00:00,2012-06-23 07:00,77
2012-06-23 07:00,2012-06-23 16:30,65
2012-06-23 16:30,2012-06-25 08:00,77
2012-06-25 08:00,2012-06-26 12:00,0
2012-06-26 12:00,2012-06-27 08:00,77
"""
df = pd.read_csv(StringIO(data), parse_dates=[0, 1])
ser_starting = df.starting
ser_starting.index = ser_starting.values
ser_starting = ser_starting.tz_localize('US/Eastern')
ser_starting = ser_starting.tz_convert('UTC')
ser_starting.index.name = 'starting'
ser_ending = df.ending
ser_ending.index = ser_ending.values
ser_ending = ser_ending.tz_localize('US/Eastern')
ser_ending = ser_ending.tz_convert('UTC')
ser_ending.index.name = 'ending'
df.starting = ser_starting.index
df.ending = ser_ending.index
tm.assert_index_equal(pd.DatetimeIndex(
df.starting), ser_starting.index)
tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)
def test_is_mixed_type(self):
assert not self.frame._is_mixed_type
assert self.mixed_frame._is_mixed_type
def test_get_numeric_data(self):
# TODO(wesm): unused?
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'f': Timestamp('20010102')},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, 'float64': 1,
datetime64name: 1, objectname: 1})
result.sort_index()
expected.sort_index()
assert_series_equal(result, expected)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'd': np.array([1.] * 10, dtype='float32'),
'e': np.array([1] * 10, dtype='int32'),
'f': np.array([1] * 10, dtype='int16'),
'g': Timestamp('20010102')},
index=np.arange(10))
result = df._get_numeric_data()
expected = df.loc[:, ['a', 'b', 'd', 'e', 'f']]
assert_frame_equal(result, expected)
only_obj = df.loc[:, ['c', 'g']]
result = only_obj._get_numeric_data()
expected = df.loc[:, []]
assert_frame_equal(result, expected)
df = DataFrame.from_dict(
{'a': [1, 2], 'b': ['foo', 'bar'], 'c': [np.pi, np.e]})
result = df._get_numeric_data()
expected = DataFrame.from_dict({'a': [1, 2], 'c': [np.pi, np.e]})
assert_frame_equal(result, expected)
df = result.copy()
result = df._get_numeric_data()
expected = df
assert_frame_equal(result, expected)
def test_convert_objects(self):
oops = self.mixed_frame.T.T
converted = oops._convert(datetime=True)
assert_frame_equal(converted, self.mixed_frame)
assert converted['A'].dtype == np.float64
# force numeric conversion
self.mixed_frame['H'] = '1.'
self.mixed_frame['I'] = '1'
# add in some items that will be nan
l = len(self.mixed_frame)
self.mixed_frame['J'] = '1.'
self.mixed_frame['K'] = '1'
self.mixed_frame.loc[0:5, ['J', 'K']] = 'garbled'
converted = self.mixed_frame._convert(datetime=True, numeric=True)
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
assert converted['J'].dtype == 'float64'
assert converted['K'].dtype == 'float64'
assert len(converted['J'].dropna()) == l - 5
assert len(converted['K'].dropna()) == l - 5
# via astype
converted = self.mixed_frame.copy()
converted['H'] = converted['H'].astype('float64')
converted['I'] = converted['I'].astype('int64')
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
# via astype, but errors
converted = self.mixed_frame.copy()
with tm.assert_raises_regex(ValueError, 'invalid literal'):
converted['H'].astype('int32')
# mixed in a single column
df = DataFrame(dict(s=Series([1, 'na', 3, 4])))
result = df._convert(datetime=True, numeric=True)
expected = DataFrame(dict(s=Series([1, np.nan, 3, 4])))
assert_frame_equal(result, expected)
def test_convert_objects_no_conversion(self):
mixed1 = DataFrame(
{'a': [1, 2, 3], 'b': [4.0, 5, 6], 'c': ['x', 'y', 'z']})
mixed2 = mixed1._convert(datetime=True)
assert_frame_equal(mixed1, mixed2)
def test_infer_objects(self):
# GH 11221
df = DataFrame({'a': ['a', 1, 2, 3],
'b': ['b', 2.0, 3.0, 4.1],
'c': ['c', datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3)],
'd': [1, 2, 3, 'd']},
columns=['a', 'b', 'c', 'd'])
df = df.iloc[1:].infer_objects()
assert df['a'].dtype == 'int64'
assert df['b'].dtype == 'float64'
assert df['c'].dtype == 'M8[ns]'
assert df['d'].dtype == 'object'
expected = DataFrame({'a': [1, 2, 3],
'b': [2.0, 3.0, 4.1],
'c': [datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3)],
'd': [2, 3, 'd']},
columns=['a', 'b', 'c', 'd'])
# reconstruct frame to verify inference is same
tm.assert_frame_equal(df.reset_index(drop=True), expected)
def test_stale_cached_series_bug_473(self):
# this is chained, but ok
with option_context('chained_assignment', None):
Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),
columns=('e', 'f', 'g', 'h'))
repr(Y)
Y['e'] = Y['e'].astype('object')
Y['g']['c'] = np.NaN
repr(Y)
result = Y.sum() # noqa
exp = Y['g'].sum() # noqa
assert pd.isna(Y['g']['c'])
def test_get_X_columns(self):
# numeric and object columns
df = DataFrame({'a': [1, 2, 3],
'b': [True, False, True],
'c': ['foo', 'bar', 'baz'],
'd': [None, None, None],
'e': [3.14, 0.577, 2.773]})
tm.assert_index_equal(df._get_numeric_data().columns,
pd.Index(['a', 'b', 'e']))
def test_strange_column_corruption_issue(self):
# (wesm) Unclear how exactly this is related to internal matters
df = DataFrame(index=[0, 1])
df[0] = nan
wasCol = {}
# uncommenting these makes the results match
# for col in xrange(100, 200):
# wasCol[col] = 1
# df[col] = nan
for i, dt in enumerate(df.index):
for col in range(100, 200):
if col not in wasCol:
wasCol[col] = 1
df[col] = nan
df[col][dt] = i
myid = 100
first = len(df.loc[pd.isna(df[myid]), [myid]])
second = len(df.loc[pd.isna(df[myid]), [myid]])
assert first == second == 0
| bsd-3-clause |
vibhorag/scikit-learn | sklearn/metrics/regression.py | 175 | 16953 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Michael Eickenberg <michael.eickenberg@gmail.com>
# Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', but
will be changed to 'uniform_average' in next versions.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
# @FIXME change in 0.18
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value, it will be changed "
"to 'uniform_average' in 0.18.",
DeprecationWarning)
multioutput = 'variance_weighted'
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
DGrady/pandas | pandas/tests/dtypes/test_dtypes.py | 10 | 18488 | # -*- coding: utf-8 -*-
import pytest
from itertools import product
import numpy as np
import pandas as pd
from pandas import Series, Categorical, IntervalIndex, date_range
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype, PeriodDtype,
IntervalDtype, CategoricalDtype)
from pandas.core.dtypes.common import (
is_categorical_dtype, is_categorical,
is_datetime64tz_dtype, is_datetimetz,
is_period_dtype, is_period,
is_dtype_equal, is_datetime64_ns_dtype,
is_datetime64_dtype, is_interval_dtype,
is_datetime64_any_dtype, is_string_dtype,
_coerce_to_dtype)
import pandas.util.testing as tm
class Base(object):
def setup_method(self, method):
self.dtype = self.create()
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
assert not self.dtype == 'foo'
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
pytest.raises(TypeError, np.dtype, self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert not len(self.dtype._cache)
assert result == self.dtype
class TestCategoricalDtype(Base):
def create(self):
return CategoricalDtype()
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
# we are a singular object so we are added
# back to the cache upon unpickling
# this is to ensure object identity
assert len(self.dtype._cache) == 1
assert result == self.dtype
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert hash(dtype) == hash(dtype2)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'category')
assert is_dtype_equal(self.dtype, CategoricalDtype())
assert not is_dtype_equal(self.dtype, 'foo')
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
assert is_dtype_equal(self.dtype, result)
pytest.raises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_is_dtype(self):
assert CategoricalDtype.is_dtype(self.dtype)
assert CategoricalDtype.is_dtype('category')
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype('foo')
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self):
assert is_categorical_dtype(self.dtype)
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Series(factor, name='A')
# dtypes
assert is_categorical_dtype(s.dtype)
assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype('float64'))
assert is_categorical(s.dtype)
assert is_categorical(s)
assert not is_categorical(np.dtype('float64'))
assert not is_categorical(1.0)
class TestDatetimeTZDtype(Base):
def create(self):
return DatetimeTZDtype('ns', 'US/Eastern')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
def test_construction(self):
pytest.raises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype('datetime64[ns, CET]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_coerce_to_dtype(self):
assert (_coerce_to_dtype('datetime64[ns, US/Eastern]') ==
DatetimeTZDtype('ns', 'US/Eastern'))
assert (_coerce_to_dtype('datetime64[ns, Asia/Tokyo]') ==
DatetimeTZDtype('ns', 'Asia/Tokyo'))
def test_compat(self):
assert is_datetime64tz_dtype(self.dtype)
assert is_datetime64tz_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_any_dtype(self.dtype)
assert is_datetime64_any_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_ns_dtype(self.dtype)
assert is_datetime64_ns_dtype('datetime64[ns, US/Eastern]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('datetime64[ns, US/Eastern]')
def test_construction_from_string(self):
result = DatetimeTZDtype('datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, result)
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, result)
pytest.raises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_is_dtype(self):
assert not DatetimeTZDtype.is_dtype(None)
assert DatetimeTZDtype.is_dtype(self.dtype)
assert DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]')
assert not DatetimeTZDtype.is_dtype('foo')
assert DatetimeTZDtype.is_dtype(DatetimeTZDtype('ns', 'US/Pacific'))
assert not DatetimeTZDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'datetime64[ns, US/Eastern]')
assert is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'US/Eastern'))
assert not is_dtype_equal(self.dtype, 'foo')
assert not is_dtype_equal(self.dtype, DatetimeTZDtype('ns', 'CET'))
assert not is_dtype_equal(DatetimeTZDtype('ns', 'US/Eastern'),
DatetimeTZDtype('ns', 'US/Pacific'))
# numpy compat
assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")
def test_basic(self):
assert is_datetime64tz_dtype(self.dtype)
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr, name='A')
# dtypes
assert is_datetime64tz_dtype(s.dtype)
assert is_datetime64tz_dtype(s)
assert not is_datetime64tz_dtype(np.dtype('float64'))
assert not is_datetime64tz_dtype(1.0)
assert is_datetimetz(s)
assert is_datetimetz(s.dtype)
assert not is_datetimetz(np.dtype('float64'))
assert not is_datetimetz(1.0)
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Series(dr1, name='A')
assert is_datetimetz(s1)
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Series(dr2, name='A')
assert is_datetimetz(s2)
assert s1.dtype == s2.dtype
def test_parser(self):
# pr #11245
for tz, constructor in product(('UTC', 'US/Eastern'),
('M8', 'datetime64')):
assert (DatetimeTZDtype('%s[ns, %s]' % (constructor, tz)) ==
DatetimeTZDtype('ns', tz))
def test_empty(self):
dt = DatetimeTZDtype()
with pytest.raises(AttributeError):
str(dt)
class TestPeriodDtype(Base):
def create(self):
return PeriodDtype('D')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = PeriodDtype('D')
dtype3 = PeriodDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
def test_construction(self):
with pytest.raises(ValueError):
PeriodDtype('xx')
for s in ['period[D]', 'Period[D]', 'D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day()
assert is_period_dtype(dt)
for s in ['period[3D]', 'Period[3D]', '3D']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Day(3)
assert is_period_dtype(dt)
for s in ['period[26H]', 'Period[26H]', '26H',
'period[1D2H]', 'Period[1D2H]', '1D2H']:
dt = PeriodDtype(s)
assert dt.freq == pd.tseries.offsets.Hour(26)
assert is_period_dtype(dt)
def test_subclass(self):
a = PeriodDtype('period[D]')
b = PeriodDtype('period[3D]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_identity(self):
assert PeriodDtype('period[D]') == PeriodDtype('period[D]')
assert PeriodDtype('period[D]') is PeriodDtype('period[D]')
assert PeriodDtype('period[3D]') == PeriodDtype('period[3D]')
assert PeriodDtype('period[3D]') is PeriodDtype('period[3D]')
assert PeriodDtype('period[1S1U]') == PeriodDtype('period[1000001U]')
assert PeriodDtype('period[1S1U]') is PeriodDtype('period[1000001U]')
def test_coerce_to_dtype(self):
assert _coerce_to_dtype('period[D]') == PeriodDtype('period[D]')
assert _coerce_to_dtype('period[3M]') == PeriodDtype('period[3M]')
def test_compat(self):
assert not is_datetime64_ns_dtype(self.dtype)
assert not is_datetime64_ns_dtype('period[D]')
assert not is_datetime64_dtype(self.dtype)
assert not is_datetime64_dtype('period[D]')
def test_construction_from_string(self):
result = PeriodDtype('period[D]')
assert is_dtype_equal(self.dtype, result)
result = PeriodDtype.construct_from_string('period[D]')
assert is_dtype_equal(self.dtype, result)
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('period[foo]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('foo[D]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns]')
with pytest.raises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns, US/Eastern]')
def test_is_dtype(self):
assert PeriodDtype.is_dtype(self.dtype)
assert PeriodDtype.is_dtype('period[D]')
assert PeriodDtype.is_dtype('period[3D]')
assert PeriodDtype.is_dtype(PeriodDtype('3D'))
assert PeriodDtype.is_dtype('period[U]')
assert PeriodDtype.is_dtype('period[S]')
assert PeriodDtype.is_dtype(PeriodDtype('U'))
assert PeriodDtype.is_dtype(PeriodDtype('S'))
assert not PeriodDtype.is_dtype('D')
assert not PeriodDtype.is_dtype('3D')
assert not PeriodDtype.is_dtype('U')
assert not PeriodDtype.is_dtype('S')
assert not PeriodDtype.is_dtype('foo')
assert not PeriodDtype.is_dtype(np.object_)
assert not PeriodDtype.is_dtype(np.int64)
assert not PeriodDtype.is_dtype(np.float64)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'period[D]')
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(self.dtype, PeriodDtype('D'))
assert is_dtype_equal(PeriodDtype('D'), PeriodDtype('D'))
assert not is_dtype_equal(self.dtype, 'D')
assert not is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D'))
def test_basic(self):
assert is_period_dtype(self.dtype)
pidx = pd.period_range('2013-01-01 09:00', periods=5, freq='H')
assert is_period_dtype(pidx.dtype)
assert is_period_dtype(pidx)
assert is_period(pidx)
s = Series(pidx, name='A')
# dtypes
# series results in object dtype currently,
# is_period checks period_arraylike
assert not is_period_dtype(s.dtype)
assert not is_period_dtype(s)
assert is_period(s)
assert not is_period_dtype(np.dtype('float64'))
assert not is_period_dtype(1.0)
assert not is_period(np.dtype('float64'))
assert not is_period(1.0)
def test_empty(self):
dt = PeriodDtype()
with pytest.raises(AttributeError):
str(dt)
def test_not_string(self):
# though PeriodDtype has object kind, it cannot be string
assert not is_string_dtype(PeriodDtype('D'))
class TestIntervalDtype(Base):
def create(self):
return IntervalDtype('int64')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = IntervalDtype('int64')
dtype3 = IntervalDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert dtype is dtype2
assert dtype2 is dtype
assert dtype3 is dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype1 = IntervalDtype('interval')
dtype2 = IntervalDtype(dtype1)
dtype3 = IntervalDtype('interval')
assert dtype2 == dtype1
assert dtype2 == dtype2
assert dtype2 == dtype3
assert dtype2 is dtype1
assert dtype2 is dtype2
assert dtype2 is dtype3
assert hash(dtype2) == hash(dtype1)
assert hash(dtype2) == hash(dtype2)
assert hash(dtype2) == hash(dtype3)
def test_construction(self):
with pytest.raises(ValueError):
IntervalDtype('xx')
for s in ['interval[int64]', 'Interval[int64]', 'int64']:
i = IntervalDtype(s)
assert i.subtype == np.dtype('int64')
assert is_interval_dtype(i)
def test_construction_generic(self):
# generic
i = IntervalDtype('interval')
assert i.subtype == ''
assert is_interval_dtype(i)
assert str(i) == 'interval[]'
i = IntervalDtype()
assert i.subtype is None
assert is_interval_dtype(i)
assert str(i) == 'interval'
def test_subclass(self):
a = IntervalDtype('interval[int64]')
b = IntervalDtype('interval[int64]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_is_dtype(self):
assert IntervalDtype.is_dtype(self.dtype)
assert IntervalDtype.is_dtype('interval')
assert IntervalDtype.is_dtype(IntervalDtype('float64'))
assert IntervalDtype.is_dtype(IntervalDtype('int64'))
assert IntervalDtype.is_dtype(IntervalDtype(np.int64))
assert not IntervalDtype.is_dtype('D')
assert not IntervalDtype.is_dtype('3D')
assert not IntervalDtype.is_dtype('U')
assert not IntervalDtype.is_dtype('S')
assert not IntervalDtype.is_dtype('foo')
assert not IntervalDtype.is_dtype(np.object_)
assert not IntervalDtype.is_dtype(np.int64)
assert not IntervalDtype.is_dtype(np.float64)
def test_identity(self):
assert (IntervalDtype('interval[int64]') ==
IntervalDtype('interval[int64]'))
def test_coerce_to_dtype(self):
assert (_coerce_to_dtype('interval[int64]') ==
IntervalDtype('interval[int64]'))
def test_construction_from_string(self):
result = IntervalDtype('interval[int64]')
assert is_dtype_equal(self.dtype, result)
result = IntervalDtype.construct_from_string('interval[int64]')
assert is_dtype_equal(self.dtype, result)
with pytest.raises(TypeError):
IntervalDtype.construct_from_string('foo')
with pytest.raises(TypeError):
IntervalDtype.construct_from_string('interval[foo]')
with pytest.raises(TypeError):
IntervalDtype.construct_from_string('foo[int64]')
def test_equality(self):
assert is_dtype_equal(self.dtype, 'interval[int64]')
assert is_dtype_equal(self.dtype, IntervalDtype('int64'))
assert is_dtype_equal(self.dtype, IntervalDtype('int64'))
assert is_dtype_equal(IntervalDtype('int64'), IntervalDtype('int64'))
assert not is_dtype_equal(self.dtype, 'int64')
assert not is_dtype_equal(IntervalDtype('int64'),
IntervalDtype('float64'))
def test_basic(self):
assert is_interval_dtype(self.dtype)
ii = IntervalIndex.from_breaks(range(3))
assert is_interval_dtype(ii.dtype)
assert is_interval_dtype(ii)
s = Series(ii, name='A')
# dtypes
# series results in object dtype currently,
assert not is_interval_dtype(s.dtype)
assert not is_interval_dtype(s)
def test_basic_dtype(self):
assert is_interval_dtype('interval[int64]')
assert is_interval_dtype(IntervalIndex.from_tuples([(0, 1)]))
assert is_interval_dtype(IntervalIndex.from_breaks(np.arange(4)))
assert is_interval_dtype(IntervalIndex.from_breaks(
date_range('20130101', periods=3)))
assert not is_interval_dtype('U')
assert not is_interval_dtype('S')
assert not is_interval_dtype('foo')
assert not is_interval_dtype(np.object_)
assert not is_interval_dtype(np.int64)
assert not is_interval_dtype(np.float64)
def test_caching(self):
IntervalDtype.reset_cache()
dtype = IntervalDtype("int64")
assert len(IntervalDtype._cache) == 1
IntervalDtype("interval")
assert len(IntervalDtype._cache) == 2
IntervalDtype.reset_cache()
tm.round_trip_pickle(dtype)
assert len(IntervalDtype._cache) == 0
| bsd-3-clause |
stczhc/neupy | examples/gd/rectangles_mlp.py | 1 | 1025 | from sklearn import cross_validation, metrics
from skdata.larochelle_etal_2007 import dataset
from neupy import algorithms, layers, environment
environment.reproducible()
rectangle_dataset = dataset.Rectangles()
rectangle_dataset.fetch(download_if_missing=True)
data, target = rectangle_dataset.classification_task()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
data, target, train_size=0.5
)
network = algorithms.MinibatchGradientDescent(
[
layers.Sigmoid(784),
layers.Sigmoid(20),
layers.RoundedOutput(1),
],
error='binary_crossentropy',
verbose=True,
show_epoch=1,
batch_size=1,
)
network.train(x_train, y_train, x_test, y_test, epochs=10)
y_predicted = network.predict(x_test)
print(metrics.classification_report(y_test, y_predicted))
roc_score = metrics.roc_auc_score(y_test, y_predicted)
print("ROC score: {}".format(roc_score))
accuracy = metrics.accuracy_score(y_test, y_predicted)
print("Accuracy: {:.2f}%".format(accuracy * 100))
| mit |
rs2/pandas | pandas/tests/generic/test_frame.py | 2 | 7703 | from copy import deepcopy
from operator import methodcaller
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, MultiIndex, Series, date_range
import pandas._testing as tm
from .test_generic import Generic
class TestDataFrame(Generic):
_typ = DataFrame
_comparator = lambda self, x, y: tm.assert_frame_equal(x, y)
def test_rename_mi(self):
df = DataFrame(
[11, 21, 31],
index=MultiIndex.from_tuples([("A", x) for x in ["a", "B", "c"]]),
)
df.rename(str.lower)
@pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"])
def test_set_axis_name(self, func):
df = pd.DataFrame([[1, 2], [3, 4]])
result = methodcaller(func, "foo")(df)
assert df.index.name is None
assert result.index.name == "foo"
result = methodcaller(func, "cols", axis=1)(df)
assert df.columns.name is None
assert result.columns.name == "cols"
@pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"])
def test_set_axis_name_mi(self, func):
df = DataFrame(
np.empty((3, 3)),
index=MultiIndex.from_tuples([("A", x) for x in list("aBc")]),
columns=MultiIndex.from_tuples([("C", x) for x in list("xyz")]),
)
level_names = ["L1", "L2"]
result = methodcaller(func, level_names)(df)
assert result.index.names == level_names
assert result.columns.names == [None, None]
result = methodcaller(func, level_names, axis=1)(df)
assert result.columns.names == ["L1", "L2"]
assert result.index.names == [None, None]
def test_nonzero_single_element(self):
# allow single item via bool method
df = DataFrame([[True]])
assert df.bool()
df = DataFrame([[False]])
assert not df.bool()
df = DataFrame([[False, False]])
msg = "The truth value of a DataFrame is ambiguous"
with pytest.raises(ValueError, match=msg):
df.bool()
with pytest.raises(ValueError, match=msg):
bool(df)
def test_get_numeric_data_preserve_dtype(self):
# get the numeric data
o = DataFrame({"A": [1, "2", 3.0]})
result = o._get_numeric_data()
expected = DataFrame(index=[0, 1, 2], dtype=object)
self._compare(result, expected)
def test_metadata_propagation_indiv(self):
# groupby
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
"C": np.random.randn(8),
"D": np.random.randn(8),
}
)
result = df.groupby("A").sum()
self.check_metadata(df, result)
# resample
df = DataFrame(
np.random.randn(1000, 2),
index=date_range("20130101", periods=1000, freq="s"),
)
result = df.resample("1T")
self.check_metadata(df, result)
# merging with override
# GH 6923
_metadata = DataFrame._metadata
_finalize = DataFrame.__finalize__
np.random.seed(10)
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=["a", "b"])
df2 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=["c", "d"])
DataFrame._metadata = ["filename"]
df1.filename = "fname1.csv"
df2.filename = "fname2.csv"
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == "merge":
left, right = other.left, other.right
value = getattr(left, name, "") + "|" + getattr(right, name, "")
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, ""))
return self
DataFrame.__finalize__ = finalize
result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner")
assert result.filename == "fname1.csv|fname2.csv"
# concat
# GH 6927
DataFrame._metadata = ["filename"]
df1 = DataFrame(np.random.randint(0, 4, (3, 2)), columns=list("ab"))
df1.filename = "foo"
def finalize(self, other, method=None, **kwargs):
for name in self._metadata:
if method == "concat":
value = "+".join(
[getattr(o, name) for o in other.objs if getattr(o, name, None)]
)
object.__setattr__(self, name, value)
else:
object.__setattr__(self, name, getattr(other, name, None))
return self
DataFrame.__finalize__ = finalize
result = pd.concat([df1, df1])
assert result.filename == "foo+foo"
# reset
DataFrame._metadata = _metadata
DataFrame.__finalize__ = _finalize # FIXME: use monkeypatch
def test_set_attribute(self):
# Test for consistent setattr behavior when an attribute and a column
# have the same name (Issue #8994)
df = DataFrame({"x": [1, 2, 3]})
df.y = 2
df["y"] = [2, 4, 6]
df.y = 5
assert df.y == 5
tm.assert_series_equal(df["y"], Series([2, 4, 6], name="y"))
def test_deepcopy_empty(self):
# This test covers empty frame copying with non-empty column sets
# as reported in issue GH15370
empty_frame = DataFrame(data=[], index=[], columns=["A"])
empty_frame_copy = deepcopy(empty_frame)
self._compare(empty_frame_copy, empty_frame)
# formerly in Generic but only test DataFrame
class TestDataFrame2:
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
def test_validate_bool_args(self, value):
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
msg = 'For argument "inplace" expected type bool, received type'
with pytest.raises(ValueError, match=msg):
super(DataFrame, df).rename_axis(
mapper={"a": "x", "b": "y"}, axis=1, inplace=value
)
with pytest.raises(ValueError, match=msg):
super(DataFrame, df).drop("a", axis=1, inplace=value)
with pytest.raises(ValueError, match=msg):
super(DataFrame, df)._consolidate(inplace=value)
with pytest.raises(ValueError, match=msg):
super(DataFrame, df).fillna(value=0, inplace=value)
with pytest.raises(ValueError, match=msg):
super(DataFrame, df).replace(to_replace=1, value=7, inplace=value)
with pytest.raises(ValueError, match=msg):
super(DataFrame, df).interpolate(inplace=value)
with pytest.raises(ValueError, match=msg):
super(DataFrame, df)._where(cond=df.a > 2, inplace=value)
with pytest.raises(ValueError, match=msg):
super(DataFrame, df).mask(cond=df.a > 2, inplace=value)
def test_unexpected_keyword(self):
# GH8597
df = DataFrame(np.random.randn(5, 2), columns=["jim", "joe"])
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
ts = df["joe"].copy()
ts[2] = np.nan
msg = "unexpected keyword"
with pytest.raises(TypeError, match=msg):
df.drop("joe", axis=1, in_place=True)
with pytest.raises(TypeError, match=msg):
df.reindex([1, 0], inplace=True)
with pytest.raises(TypeError, match=msg):
ca.fillna(0, inplace=True)
with pytest.raises(TypeError, match=msg):
ts.fillna(0, in_place=True)
| bsd-3-clause |
reminisce/mxnet | example/speech_recognition/stt_utils.py | 8 | 5838 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import os.path
import numpy as np
from numpy.lib.stride_tricks import as_strided
import soundfile
def calc_feat_dim(window, max_freq):
return int(0.001 * window * max_freq) + 1
def conv_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
def spectrogram(samples, fft_length=256, sample_rate=2, hop_length=128):
"""
Compute the spectrogram for a real signal.
The parameters follow the naming convention of
matplotlib.mlab.specgram
Args:
samples (1D array): input audio signal
fft_length (int): number of elements in fft window
sample_rate (scalar): sample rate
hop_length (int): hop length (relative offset between neighboring
fft windows).
Returns:
x (2D array): spectrogram [frequency x time]
freq (1D array): frequency of each row in x
Note:
This is a truncating computation e.g. if fft_length=10,
hop_length=5 and the signal has 23 elements, then the
last 3 elements will be truncated.
"""
assert not np.iscomplexobj(samples), "Must not pass in complex numbers"
window = np.hanning(fft_length)[:, None]
window_norm = np.sum(window ** 2)
# The scaling below follows the convention of
# matplotlib.mlab.specgram which is the same as
# matlabs specgram.
scale = window_norm * sample_rate
trunc = (len(samples) - fft_length) % hop_length
x = samples[:len(samples) - trunc]
# "stride trick" reshape to include overlap
nshape = (fft_length, (len(x) - fft_length) // hop_length + 1)
nstrides = (x.strides[0], x.strides[0] * hop_length)
x = as_strided(x, shape=nshape, strides=nstrides)
# window stride sanity check
assert np.all(x[:, 1] == samples[hop_length:(hop_length + fft_length)])
# broadcast window, compute fft over columns and square mod
# This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT).
x = np.fft.rfft(x * window, axis=0)
x = np.absolute(x) ** 2
# scale, 2.0 for everything except dc and fft_length/2
x[1:-1, :] *= (2.0 / scale)
x[(0, -1), :] /= scale
freqs = float(sample_rate) / fft_length * np.arange(x.shape[0])
return x, freqs
def spectrogram_from_file(filename, step=10, window=20, max_freq=None,
eps=1e-14, overwrite=False, save_feature_as_csvfile=False):
""" Calculate the log of linear spectrogram from FFT energy
Params:
filename (str): Path to the audio file
step (int): Step size in milliseconds between windows
window (int): FFT window size in milliseconds
max_freq (int): Only FFT bins corresponding to frequencies between
[0, max_freq] are returned
eps (float): Small value to ensure numerical stability (for ln(x))
"""
csvfilename = filename.replace(".wav", ".csv")
if (os.path.isfile(csvfilename) is False) or overwrite:
with soundfile.SoundFile(filename) as sound_file:
audio = sound_file.read(dtype='float32')
sample_rate = sound_file.samplerate
if audio.ndim >= 2:
audio = np.mean(audio, 1)
if max_freq is None:
max_freq = sample_rate / 2
if max_freq > sample_rate / 2:
raise ValueError("max_freq must not be greater than half of "
" sample rate")
if step > window:
raise ValueError("step size must not be greater than window size")
hop_length = int(0.001 * step * sample_rate)
fft_length = int(0.001 * window * sample_rate)
pxx, freqs = spectrogram(
audio, fft_length=fft_length, sample_rate=sample_rate,
hop_length=hop_length)
ind = np.where(freqs <= max_freq)[0][-1] + 1
res = np.transpose(np.log(pxx[:ind, :] + eps))
if save_feature_as_csvfile:
np.savetxt(csvfilename, res)
return res
else:
return np.loadtxt(csvfilename)
| apache-2.0 |
yanikou19/pymatgen | pymatgen/analysis/pourbaix/plotter.py | 4 | 24049 | # coding: utf-8
from __future__ import division, unicode_literals
"""
This module provides classes for plotting Pourbaix objects.
"""
import six
from six.moves import map
from six.moves import zip
__author__ = "Sai Jayaraman"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.1"
__maintainer__ = "Sai Jayaraman"
__email__ = "sjayaram@mit.edu"
__status__ = "Production"
__date__ = "Jan 26, 2012"
import numpy as np
import re
import collections
from pymatgen.analysis.pourbaix.analyzer import PourbaixAnalyzer
from pymatgen.analysis.pourbaix.maker import PREFAC
from pymatgen.analysis.pourbaix.entry import MultiEntry
from pymatgen.phasediagram.plotter import uniquelines
from pymatgen.util.string_utils import latexify
from pymatgen.util.plotting_utils import get_publication_quality_plot
from pymatgen.util.coord_utils import in_coord_list
class PourbaixPlotter(object):
"""
A plotter class for phase diagrams.
Args:
phasediagram: A PhaseDiagram object.
show_unstable: Whether unstable phases will be plotted as well as
red crosses. Defaults to False.
"""
def __init__(self, pourbaixdiagram, show_unstable=False):
self._pd = pourbaixdiagram
self.lines = uniquelines(self._pd.facets)
self.show_unstable = show_unstable
@property
def pourbaix_hull_plot_data(self):
"""
Pourbaix diagram convex hull data.
Returns:
(lines, stable_entries, unstable_entries)
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
facetlines = self.lines
lines = list()
stable_entries = dict()
for line in facetlines:
entry1 = entries[line[0]]
entry2 = entries[line[1]]
x = [data[line[0]][0], data[line[1]][0]]
y = [data[line[0]][1], data[line[1]][1]]
z = [data[line[0]][2], data[line[1]][2]]
coord = [x, y, z]
lines.append(coord)
labelcoord = list(zip(*coord))
stable_entries[labelcoord[0]] = entry1
stable_entries[labelcoord[1]] = entry2
allentries = pd.all_entries
alldata = np.array(pd.qhull_data)
unstable_entries = dict()
stable = pd.stable_entries
for i in range(len(allentries)):
entry = allentries[i]
if entry not in stable:
x = [alldata[i][0], alldata[i][0]]
y = [alldata[i][1], alldata[i][1]]
z = [alldata[i][2], alldata[i][2]]
coord = [x, y, z]
labelcoord = list(zip(*coord))
unstable_entries[entry] = labelcoord[0]
return lines, stable_entries, unstable_entries
def show(self, label_stable=True, label_unstable=False, filename=""):
"""
Draws the convex hull diagram using Matplotlib and show it.
"""
plt = self._get_plot(label_stable=label_stable,
label_unstable=label_unstable)
if filename == "":
plt.show()
else:
plt.savefig(filename, bbox_inches=0)
def _get_plot(self, label_stable=True, label_unstable=False):
"""
Plot convex hull of Pourbaix Diagram entries
"""
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = p3.Axes3D(fig)
font = FontProperties()
font.set_weight("bold")
font.set_size(14)
(lines, labels, unstable) = self.pourbaix_hull_plot_data
count = 1
newlabels = list()
for x, y, z in lines:
ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b",
markerfacecolor="r", markersize=10)
for coords in sorted(labels.keys()):
entry = labels[coords]
label = self.print_name(entry)
if label_stable:
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(
count, latexify_ion(latexify(label))))
count += 1
if label_unstable:
for entry in unstable.keys():
label = self.print_name(entry)
coords = unstable[entry]
ax.plot([coords[0], coords[0]], [coords[1], coords[1]],
[coords[2], coords[2]], "bo", markerfacecolor="g",
markersize=10)
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(
count, latexify_ion(latexify(label))))
count += 1
plt.figtext(0.01, 0.01, "\n".join(newlabels))
plt.xlabel("pH")
plt.ylabel("V")
return plt
def plot_planes(self):
"""
Plot the free energy planes as a function of pH and V
"""
if self.show_unstable:
entries = self._pd._all_entries
else:
entries = self._pd.stable_entries
num_plots = len(entries)
import matplotlib.pyplot as plt
colormap = plt.cm.gist_ncar
fig = plt.figure().gca(projection='3d')
color_array = [colormap(i) for i in np.linspace(0, 0.9, num_plots)]
labels = []
color_index = -1
for entry in entries:
normal = np.array([-PREFAC * entry.npH, -entry.nPhi, +1])
d = entry.g0
color_index += 1
pH, V = np.meshgrid(np.linspace(-10, 28, 100),
np.linspace(-3, 3, 100))
g = (-normal[0] * pH - normal[1] * V + d) / normal[2]
lbl = latexify_ion(
latexify(entry._entry.composition.reduced_formula))
labels.append(lbl)
fig.plot_surface(pH, V, g, color=color_array[color_index],
label=lbl)
plt.legend(labels)
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.show()
def plot_chempot_range_map(self, limits=None, title="", filename=""):
self.plot_pourbaix(limits, title, filename)
def plot_pourbaix(self, limits=None, title="", filename="", label_domains=True):
plt = self.get_pourbaix_plot(limits=limits, title=title, label_domains=label_domains)
if filename == "":
plt.show()
else:
f = plt.gcf()
f.set_size_inches((11.5, 9))
plt.tight_layout(pad=1.09)
def pourbaix_plot_data(self, limits=None):
"""
Get data required to plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
Returns:
stable_entries, unstable_entries
stable_entries: dict of lines. The keys are Pourbaix Entries, and
lines are in the form of a list
unstable_entries: list of unstable entries
"""
analyzer = PourbaixAnalyzer(self._pd)
self._analyzer = analyzer
if limits:
analyzer.chempot_limits = limits
chempot_ranges = analyzer.get_chempot_range_map(limits)
self.chempot_ranges = chempot_ranges
stable_entries_list = collections.defaultdict(list)
for entry in chempot_ranges:
for line in chempot_ranges[entry]:
x = [line.coords[0][0], line.coords[1][0]]
y = [line.coords[0][1], line.coords[1][1]]
coords = [x, y]
stable_entries_list[entry].append(coords)
unstable_entries_list = [entry for entry in self._pd.all_entries
if entry not in self._pd.stable_entries]
return stable_entries_list, unstable_entries_list
def get_center(self, lines):
"""
Returns coordinates of center of a domain. Useful
for labeling a Pourbaix plot.
Args:
lines:
Lines corresponding to a domain
limits:
Limits of Pourbaix diagram
Returns:
center_x, center_y:
x,y coordinate of center of domain. If domain lies
outside limits, center will lie on the boundary.
"""
center_x = 0.0
center_y = 0.0
coords = []
count_center = 0.0
for line in lines:
for coord in np.array(line).T:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
cx = coord[0]
cy = coord[1]
center_x += cx
center_y += cy
count_center += 1.0
if count_center == 0.0:
count_center = 1.0
center_x /= count_center
center_y /= count_center
return center_x, center_y
def get_pourbaix_plot(self, limits=None, title="", label_domains=True):
"""
Plot Pourbaix diagram.
Args:
limits: 2D list containing limits of the Pourbaix diagram
of the form [[xlo, xhi], [ylo, yhi]]
Returns:
plt:
matplotlib plot object
"""
# plt = get_publication_quality_plot(24, 14.4)
plt = get_publication_quality_plot(16)
(stable, unstable) = self.pourbaix_plot_data(limits)
if limits:
xlim = limits[0]
ylim = limits[1]
else:
xlim = self._analyzer.chempot_limits[0]
ylim = self._analyzer.chempot_limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
for entry, lines in stable.items():
center_x = 0.0
center_y = 0.0
coords = []
count_center = 0.0
for line in lines:
(x, y) = line
plt.plot(x, y, "k-", linewidth=lw)
for coord in np.array(line).T:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
cx = coord[0]
cy = coord[1]
center_x += cx
center_y += cy
count_center += 1.0
if count_center == 0.0:
count_center = 1.0
center_x /= count_center
center_y /= count_center
if ((center_x <= xlim[0]) | (center_x >= xlim[1]) |
(center_y <= ylim[0]) | (center_y >= ylim[1])):
continue
xy = (center_x, center_y)
if label_domains:
plt.annotate(self.print_name(entry), xy, fontsize=20, color="b")
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
def print_name(self, entry):
"""
Print entry name if single, else print multientry
"""
str_name = ""
if isinstance(entry, MultiEntry):
if len(entry.entrylist) > 2:
return str(self._pd.qhull_entries.index(entry))
for e in entry.entrylist:
str_name += latexify_ion(latexify(e.name)) + " + "
str_name = str_name[:-3]
return str_name
else:
return latexify_ion(latexify(entry.name))
def legend(self, label_unstable=False, legend_file=""):
if self._pd._multielement:
unprocessed_entries = self._pd.unprocessed_entries
set_of_entries = set()
list_of_entries = {}
for entry in self._pd.stable_entries:
index_ent = self._pd.qhull_entries.index(entry)
str_ename = ""
for e in entry.entrylist:
str_ename += e.name + " + "
for ent in unprocessed_entries:
if ent.name == e.name:
indx = unprocessed_entries.index(ent)
set_of_entries.add(indx)
continue
str_ename = str_ename[:-3]
list_of_entries[index_ent] = str_ename
if label_unstable:
for entry in [entry for entry in self._pd.all_entries
if entry not in self._pd.stable_entries]:
for e in entry.entrylist:
indx = unprocessed_entries.index(e)
set_of_entries.add(indx)
str_labels = " Species: \n"
if legend_file:
f = open(legend_file, 'w')
for i in list_of_entries.keys():
str_labels += str(i) + " : " + list_of_entries[i] + "\n"
f.write(str_labels)
f.close()
return str_labels
def write_image(self, plt, stream, image_format="svg"):
"""
Writes the phase diagram to an image in a stream.
Args:
plt:
matplotlib plot
stream:
stream to write to. Can be a file stream or a StringIO stream.
image_format
format for image. Can be any of matplotlib supported formats.
Defaults to svg for best results for vector graphics.
"""
f = plt.gcf()
f.set_size_inches((12, 10))
plt.tight_layout(pad=1.09)
plt.savefig(stream, format=image_format)
def domain_vertices(self, entry):
"""
Returns the vertices of the Pourbaix domain.
Args:
entry: Entry for which domain vertices are desired
Returns:
list of vertices
"""
if entry not in self._analyzer.pourbaix_domain_vertices.keys():
return []
return self._analyzer.pourbaix_domain_vertices[entry]
def get_pourbaix_plot_colorfill_by_element(self, limits=None, title="",
label_domains=True, element=None):
"""
Color domains by element
"""
from matplotlib.patches import Polygon
entry_dict_of_multientries = collections.defaultdict(list)
plt = get_publication_quality_plot(16)
optim_colors = ['#0000FF', '#FF0000', '#00FF00', '#FFFF00', '#FF00FF',
'#FF8080', '#DCDCDC', '#800000', '#FF8000']
optim_font_color = ['#FFFFA0', '#00FFFF', '#FF00FF', '#0000FF', '#00FF00',
'#007F7F', '#232323', '#7FFFFF', '#007FFF']
hatch = ['/', '\\', '|', '-', '+', 'o', '*']
(stable, unstable) = self.pourbaix_plot_data(limits)
num_of_overlaps = {key: 0 for key in stable.keys()}
for entry in stable:
if isinstance(entry, MultiEntry):
for e in entry.entrylist:
if element in e.composition.elements:
entry_dict_of_multientries[e.name].append(entry)
num_of_overlaps[entry] += 1
else:
entry_dict_of_multientries[entry.name].append(entry)
if limits:
xlim = limits[0]
ylim = limits[1]
else:
xlim = self._analyzer.chempot_limits[0]
ylim = self._analyzer.chempot_limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
from pymatgen import Composition, Element
from pymatgen.core.ion import Ion
def len_elts(entry):
if "(s)" in entry:
comp = Composition(entry[:-3])
else:
comp = Ion.from_formula(entry)
return len([el for el in comp.elements if el not in
[Element("H"), Element("O")]])
sorted_entry = entry_dict_of_multientries.keys()
sorted_entry.sort(key=len_elts)
i = -1
label_chr = map(chr, list(range(65, 91)))
for entry in sorted_entry:
color_indx = 0
x_coord = 0.0
y_coord = 0.0
npts = 0
i += 1
for e in entry_dict_of_multientries[entry]:
hc = 0
fc = 0
bc = 0
xy = self.domain_vertices(e)
c = self.get_center(stable[e])
x_coord += c[0]
y_coord += c[1]
npts += 1
color_indx = i
if "(s)" in entry:
comp = Composition(entry[:-3])
else:
comp = Ion.from_formula(entry)
if len([el for el in comp.elements if el not in
[Element("H"), Element("O")]]) == 1:
if color_indx >= len(optim_colors):
color_indx = color_indx -\
int(color_indx / len(optim_colors)) * len(optim_colors)
patch = Polygon(xy, facecolor=optim_colors[color_indx],
closed=True, lw=3.0, fill=True)
bc = optim_colors[color_indx]
else:
if color_indx >= len(hatch):
color_indx = color_indx - int(color_indx / len(hatch)) * len(hatch)
patch = Polygon(xy, hatch=hatch[color_indx], closed=True, lw=3.0, fill=False)
hc = hatch[color_indx]
ax.add_patch(patch)
xy_center = (x_coord / npts, y_coord / npts)
if label_domains:
if color_indx >= len(optim_colors):
color_indx = color_indx -\
int(color_indx / len(optim_colors)) * len(optim_colors)
fc = optim_font_color[color_indx]
if bc and not hc:
bbox = dict(boxstyle="round", fc=fc)
if hc and not bc:
bc = 'k'
fc = 'w'
bbox = dict(boxstyle="round", hatch=hc, fill=False)
if bc and hc:
bbox = dict(boxstyle="round", hatch=hc, fc=fc)
# bbox.set_path_effects([PathEffects.withSimplePatchShadow()])
plt.annotate(latexify_ion(latexify(entry)), xy_center,
color=bc, fontsize=30, bbox=bbox)
# plt.annotate(label_chr[i], xy_center,
# color=bc, fontsize=30, bbox=bbox)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
def get_pourbaix_mark_passive(self, limits=None, title="", label_domains=True, passive_entry=None):
"""
Color domains by element
"""
from matplotlib.patches import Polygon
from pymatgen import Element
from itertools import chain
import operator
plt = get_publication_quality_plot(16)
optim_colors = ['#0000FF', '#FF0000', '#00FF00', '#FFFF00', '#FF00FF',
'#FF8080', '#DCDCDC', '#800000', '#FF8000']
optim_font_colors = ['#FFC000', '#00FFFF', '#FF00FF', '#0000FF', '#00FF00',
'#007F7F', '#232323', '#7FFFFF', '#007FFF']
(stable, unstable) = self.pourbaix_plot_data(limits)
mark_passive = {key: 0 for key in stable.keys()}
if self._pd._elt_comp:
maxval = max(six.iteritems(self._pd._elt_comp), key=operator.itemgetter(1))[1]
key = [k for k, v in self._pd._elt_comp.items() if v == maxval]
passive_entry = key[0]
def list_elts(entry):
elts_list = set()
if isinstance(entry, MultiEntry):
for el in chain.from_iterable([[el for el in e.composition.elements]
for e in entry.entrylist]):
elts_list.add(el)
else:
elts_list = entry.composition.elements
return elts_list
for entry in stable:
if passive_entry + str("(s)") in entry.name:
mark_passive[entry] = 2
continue
if "(s)" not in entry.name:
continue
elif len(set([Element("O"), Element("H")]).intersection(set(list_elts(entry)))) > 0:
mark_passive[entry] = 1
if limits:
xlim = limits[0]
ylim = limits[1]
else:
xlim = self._analyzer.chempot_limits[0]
ylim = self._analyzer.chempot_limits[1]
h_line = np.transpose([[xlim[0], -xlim[0] * PREFAC],
[xlim[1], -xlim[1] * PREFAC]])
o_line = np.transpose([[xlim[0], -xlim[0] * PREFAC + 1.23],
[xlim[1], -xlim[1] * PREFAC + 1.23]])
neutral_line = np.transpose([[7, ylim[0]], [7, ylim[1]]])
V0_line = np.transpose([[xlim[0], 0], [xlim[1], 0]])
ax = plt.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
for e in stable.keys():
xy = self.domain_vertices(e)
c = self.get_center(stable[e])
if mark_passive[e] == 1:
color = optim_colors[0]
fontcolor = optim_font_colors[0]
colorfill = True
elif mark_passive[e] == 2:
color = optim_colors[1]
fontcolor = optim_font_colors[1]
colorfill = True
else:
color = "w"
colorfill = False
fontcolor = "k"
patch = Polygon(xy, facecolor=color, closed=True, lw=3.0, fill=colorfill)
ax.add_patch(patch)
if label_domains:
plt.annotate(self.print_name(e), c, color=fontcolor, fontsize=20)
lw = 3
plt.plot(h_line[0], h_line[1], "r--", linewidth=lw)
plt.plot(o_line[0], o_line[1], "r--", linewidth=lw)
plt.plot(neutral_line[0], neutral_line[1], "k-.", linewidth=lw)
plt.plot(V0_line[0], V0_line[1], "k-.", linewidth=lw)
plt.xlabel("pH")
plt.ylabel("E (V)")
plt.title(title, fontsize=20, fontweight='bold')
return plt
def latexify_ion(formula):
return re.sub(r"()\[([^)]*)\]", r"\1$^{\2}$", formula)
| mit |
valexandersaulys/prudential_insurance_kaggle | venv/lib/python2.7/site-packages/pandas/tests/test_series.py | 9 | 288883 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import re
import sys
from datetime import datetime, timedelta
import operator
import string
from inspect import getargspec
from itertools import product, starmap
from distutils.version import LooseVersion
import warnings
import random
import nose
from numpy import nan, inf
import numpy as np
import numpy.ma as ma
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, notnull, bdate_range,
date_range, period_range, timedelta_range, _np_version_under1p8)
from pandas.core.index import MultiIndex
from pandas.core.indexing import IndexingError
from pandas.tseries.period import PeriodIndex
from pandas.tseries.index import Timestamp, DatetimeIndex
from pandas.tseries.tdi import Timedelta, TimedeltaIndex
import pandas.core.common as com
import pandas.core.config as cf
import pandas.lib as lib
import pandas.core.datetools as datetools
import pandas.core.nanops as nanops
from pandas.compat import StringIO, lrange, range, zip, u, OrderedDict, long
from pandas import compat
from pandas.util.testing import (assert_series_equal,
assert_almost_equal,
assert_frame_equal,
assert_index_equal,
ensure_clean)
import pandas.util.testing as tm
#------------------------------------------------------------------------------
# Series test cases
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class CheckNameIntegration(object):
_multiprocess_can_split_ = True
def test_scalarop_preserve_name(self):
result = self.ts * 2
self.assertEqual(result.name, self.ts.name)
def test_copy_name(self):
result = self.ts.copy()
self.assertEqual(result.name, self.ts.name)
def test_copy_index_name_checking(self):
# don't want to be able to modify the index stored elsewhere after
# making a copy
self.ts.index.name = None
self.assertIsNone(self.ts.index.name)
self.assertIs(self.ts, self.ts)
cp = self.ts.copy()
cp.index.name = 'foo'
com.pprint_thing(self.ts.index.name)
self.assertIsNone(self.ts.index.name)
def test_append_preserve_name(self):
result = self.ts[:5].append(self.ts[5:])
self.assertEqual(result.name, self.ts.name)
def test_dt_namespace_accessor(self):
# GH 7207
# test .dt namespace accessor
ok_for_base = ['year','month','day','hour','minute','second','weekofyear','week','dayofweek','weekday','dayofyear','quarter','freq','days_in_month','daysinmonth']
ok_for_period = ok_for_base + ['qyear']
ok_for_period_methods = ['strftime']
ok_for_dt = ok_for_base + ['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
'is_quarter_end', 'is_year_start', 'is_year_end', 'tz']
ok_for_dt_methods = ['to_period','to_pydatetime','tz_localize','tz_convert', 'normalize', 'strftime']
ok_for_td = ['days','seconds','microseconds','nanoseconds']
ok_for_td_methods = ['components','to_pytimedelta','total_seconds']
def get_expected(s, name):
result = getattr(Index(s._values),prop)
if isinstance(result, np.ndarray):
if com.is_integer_dtype(result):
result = result.astype('int64')
elif not com.is_list_like(result):
return result
return Series(result,index=s.index)
def compare(s, name):
a = getattr(s.dt,prop)
b = get_expected(s,prop)
if not (com.is_list_like(a) and com.is_list_like(b)):
self.assertEqual(a,b)
else:
tm.assert_series_equal(a,b)
# datetimeindex
for s in [Series(date_range('20130101',periods=5)),
Series(date_range('20130101',periods=5,freq='s')),
Series(date_range('20130101 00:00:00',periods=5,freq='ms'))]:
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt, prop)
result = s.dt.to_pydatetime()
self.assertIsInstance(result,np.ndarray)
self.assertTrue(result.dtype == object)
result = s.dt.tz_localize('US/Eastern')
expected = Series(DatetimeIndex(s.values).tz_localize('US/Eastern'),index=s.index)
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
self.assertEqual(str(tz_result), 'US/Eastern')
freq_result = s.dt.freq
self.assertEqual(freq_result, DatetimeIndex(s.values, freq='infer').freq)
# let's localize, then convert
result = s.dt.tz_localize('UTC').dt.tz_convert('US/Eastern')
expected = Series(DatetimeIndex(s.values).tz_localize('UTC').tz_convert('US/Eastern'),index=s.index)
tm.assert_series_equal(result, expected)
# datetimeindex with tz
s = Series(date_range('20130101',periods=5,tz='US/Eastern'))
for prop in ok_for_dt:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_dt_methods:
getattr(s.dt,prop)
result = s.dt.to_pydatetime()
self.assertIsInstance(result,np.ndarray)
self.assertTrue(result.dtype == object)
result = s.dt.tz_convert('CET')
expected = Series(s._values.tz_convert('CET'),index=s.index)
tm.assert_series_equal(result, expected)
tz_result = result.dt.tz
self.assertEqual(str(tz_result), 'CET')
freq_result = s.dt.freq
self.assertEqual(freq_result, DatetimeIndex(s.values, freq='infer').freq)
# timedeltaindex
for s in [Series(timedelta_range('1 day',periods=5),index=list('abcde')),
Series(timedelta_range('1 day 01:23:45',periods=5,freq='s')),
Series(timedelta_range('2 days 01:23:45.012345',periods=5,freq='ms'))]:
for prop in ok_for_td:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_td_methods:
getattr(s.dt, prop)
result = s.dt.components
self.assertIsInstance(result,DataFrame)
tm.assert_index_equal(result.index,s.index)
result = s.dt.to_pytimedelta()
self.assertIsInstance(result,np.ndarray)
self.assertTrue(result.dtype == object)
result = s.dt.total_seconds()
self.assertIsInstance(result,pd.Series)
self.assertTrue(result.dtype == 'float64')
freq_result = s.dt.freq
self.assertEqual(freq_result, TimedeltaIndex(s.values, freq='infer').freq)
# both
index = date_range('20130101',periods=3,freq='D')
s = Series(date_range('20140204',periods=3,freq='s'),index=index)
tm.assert_series_equal(s.dt.year,Series(np.array([2014,2014,2014],dtype='int64'),index=index))
tm.assert_series_equal(s.dt.month,Series(np.array([2,2,2],dtype='int64'),index=index))
tm.assert_series_equal(s.dt.second,Series(np.array([0,1,2],dtype='int64'),index=index))
tm.assert_series_equal(s.dt.normalize(), pd.Series([s[0]] * 3, index=index))
# periodindex
for s in [Series(period_range('20130101',periods=5,freq='D'))]:
for prop in ok_for_period:
# we test freq below
if prop != 'freq':
compare(s, prop)
for prop in ok_for_period_methods:
getattr(s.dt, prop)
freq_result = s.dt.freq
self.assertEqual(freq_result, PeriodIndex(s.values).freq)
# test limited display api
def get_dir(s):
results = [ r for r in s.dt.__dir__() if not r.startswith('_') ]
return list(sorted(set(results)))
s = Series(date_range('20130101',periods=5,freq='D'))
results = get_dir(s)
tm.assert_almost_equal(results,list(sorted(set(ok_for_dt + ok_for_dt_methods))))
s = Series(period_range('20130101',periods=5,freq='D').asobject)
results = get_dir(s)
tm.assert_almost_equal(results, list(sorted(set(ok_for_period + ok_for_period_methods))))
# 11295
# ambiguous time error on the conversions
s = Series(pd.date_range('2015-01-01', '2016-01-01', freq='T'))
s = s.dt.tz_localize('UTC').dt.tz_convert('America/Chicago')
results = get_dir(s)
tm.assert_almost_equal(results, list(sorted(set(ok_for_dt + ok_for_dt_methods))))
expected = Series(pd.date_range('2015-01-01',
'2016-01-01',
freq='T',
tz='UTC').tz_convert('America/Chicago'))
tm.assert_series_equal(s, expected)
# no setting allowed
s = Series(date_range('20130101',periods=5,freq='D'))
with tm.assertRaisesRegexp(ValueError, "modifications"):
s.dt.hour = 5
# trying to set a copy
with pd.option_context('chained_assignment','raise'):
def f():
s.dt.hour[0] = 5
self.assertRaises(com.SettingWithCopyError, f)
def test_dt_accessor_no_new_attributes(self):
# https://github.com/pydata/pandas/issues/10673
s = Series(date_range('20130101',periods=5,freq='D'))
with tm.assertRaisesRegexp(AttributeError, "You cannot add any new attribute"):
s.dt.xlabel = "a"
def test_strftime(self):
# GH 10086
s = Series(date_range('20130101', periods=5))
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['2013/01/01', '2013/01/02', '2013/01/03', '2013/01/04', '2013/01/05'])
tm.assert_series_equal(result, expected)
s = Series(date_range('2015-02-03 11:22:33.4567', periods=5))
result = s.dt.strftime('%Y/%m/%d %H-%M-%S')
expected = Series(['2015/02/03 11-22-33', '2015/02/04 11-22-33', '2015/02/05 11-22-33',
'2015/02/06 11-22-33', '2015/02/07 11-22-33'])
tm.assert_series_equal(result, expected)
s = Series(period_range('20130101', periods=5))
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['2013/01/01', '2013/01/02', '2013/01/03', '2013/01/04', '2013/01/05'])
tm.assert_series_equal(result, expected)
s = Series(period_range('2015-02-03 11:22:33.4567', periods=5, freq='s'))
result = s.dt.strftime('%Y/%m/%d %H-%M-%S')
expected = Series(['2015/02/03 11-22-33', '2015/02/03 11-22-34', '2015/02/03 11-22-35',
'2015/02/03 11-22-36', '2015/02/03 11-22-37'])
tm.assert_series_equal(result, expected)
s = Series(date_range('20130101', periods=5))
s.iloc[0] = pd.NaT
result = s.dt.strftime('%Y/%m/%d')
expected = Series(['NaT', '2013/01/02', '2013/01/03', '2013/01/04', '2013/01/05'])
tm.assert_series_equal(result, expected)
datetime_index = date_range('20150301', periods=5)
result = datetime_index.strftime("%Y/%m/%d")
expected = np.array(['2015/03/01', '2015/03/02', '2015/03/03', '2015/03/04', '2015/03/05'], dtype=object)
self.assert_numpy_array_equal(result, expected)
period_index = period_range('20150301', periods=5)
result = period_index.strftime("%Y/%m/%d")
expected = np.array(['2015/03/01', '2015/03/02', '2015/03/03', '2015/03/04', '2015/03/05'], dtype=object)
self.assert_numpy_array_equal(result, expected)
s = Series([datetime(2013, 1, 1, 2, 32, 59), datetime(2013, 1, 2, 14, 32, 1)])
result = s.dt.strftime('%Y-%m-%d %H:%M:%S')
expected = Series(["2013-01-01 02:32:59", "2013-01-02 14:32:01"])
tm.assert_series_equal(result, expected)
s = Series(period_range('20130101', periods=4, freq='H'))
result = s.dt.strftime('%Y/%m/%d %H:%M:%S')
expected = Series(["2013/01/01 00:00:00", "2013/01/01 01:00:00",
"2013/01/01 02:00:00", "2013/01/01 03:00:00"])
s = Series(period_range('20130101', periods=4, freq='L'))
result = s.dt.strftime('%Y/%m/%d %H:%M:%S.%l')
expected = Series(["2013/01/01 00:00:00.000", "2013/01/01 00:00:00.001",
"2013/01/01 00:00:00.002", "2013/01/01 00:00:00.003"])
tm.assert_series_equal(result, expected)
def test_valid_dt_with_missing_values(self):
from datetime import date, time
# GH 8689
s = Series(date_range('20130101',periods=5,freq='D'))
s.iloc[2] = pd.NaT
for attr in ['microsecond','nanosecond','second','minute','hour','day']:
expected = getattr(s.dt,attr).copy()
expected.iloc[2] = np.nan
result = getattr(s.dt,attr)
tm.assert_series_equal(result, expected)
result = s.dt.date
expected = Series([date(2013,1,1),date(2013,1,2),np.nan,date(2013,1,4),date(2013,1,5)],dtype='object')
tm.assert_series_equal(result, expected)
result = s.dt.time
expected = Series([time(0),time(0),np.nan,time(0),time(0)],dtype='object')
tm.assert_series_equal(result, expected)
def test_dt_accessor_api(self):
# GH 9322
from pandas.tseries.common import (CombinedDatetimelikeProperties,
DatetimeProperties)
self.assertIs(Series.dt, CombinedDatetimelikeProperties)
s = Series(date_range('2000-01-01', periods=3))
self.assertIsInstance(s.dt, DatetimeProperties)
for s in [Series(np.arange(5)),
Series(list('abcde')),
Series(np.random.randn(5))]:
with tm.assertRaisesRegexp(AttributeError,
"only use .dt accessor"):
s.dt
self.assertFalse(hasattr(s, 'dt'))
def test_tab_completion(self):
# GH 9910
s = Series(list('abcd'))
# Series of str values should have .str but not .dt/.cat in __dir__
self.assertTrue('str' in dir(s))
self.assertTrue('dt' not in dir(s))
self.assertTrue('cat' not in dir(s))
# similiarly for .dt
s = Series(date_range('1/1/2015', periods=5))
self.assertTrue('dt' in dir(s))
self.assertTrue('str' not in dir(s))
self.assertTrue('cat' not in dir(s))
# similiarly for .cat, but with the twist that str and dt should be there
# if the categories are of that type
# first cat and str
s = Series(list('abbcd'), dtype="category")
self.assertTrue('cat' in dir(s))
self.assertTrue('str' in dir(s)) # as it is a string categorical
self.assertTrue('dt' not in dir(s))
# similar to cat and str
s = Series(date_range('1/1/2015', periods=5)).astype("category")
self.assertTrue('cat' in dir(s))
self.assertTrue('str' not in dir(s))
self.assertTrue('dt' in dir(s)) # as it is a datetime categorical
def test_binop_maybe_preserve_name(self):
# names match, preserve
result = self.ts * self.ts
self.assertEqual(result.name, self.ts.name)
result = self.ts.mul(self.ts)
self.assertEqual(result.name, self.ts.name)
result = self.ts * self.ts[:-2]
self.assertEqual(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'something else'
result = self.ts + cp
self.assertIsNone(result.name)
result = self.ts.add(cp)
self.assertIsNone(result.name)
ops = ['add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow']
ops = ops + ['r' + op for op in ops]
for op in ops:
# names match, preserve
s = self.ts.copy()
result = getattr(s, op)(s)
self.assertEqual(result.name, self.ts.name)
# names don't match, don't preserve
cp = self.ts.copy()
cp.name = 'changed'
result = getattr(s, op)(cp)
self.assertIsNone(result.name)
def test_combine_first_name(self):
result = self.ts.combine_first(self.ts[:5])
self.assertEqual(result.name, self.ts.name)
def test_combine_first_dt64(self):
from pandas.tseries.tools import to_datetime
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = to_datetime(Series([np.NaN, "2011"]))
rs = s0.combine_first(s1)
xp = to_datetime(Series(['2010', '2011']))
assert_series_equal(rs, xp)
s0 = to_datetime(Series(["2010", np.NaN]))
s1 = Series([np.NaN, "2011"])
rs = s0.combine_first(s1)
xp = Series([datetime(2010, 1, 1), '2011'])
assert_series_equal(rs, xp)
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
self.assertEqual(result,expected)
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index([25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
self.assertEqual(result,expected)
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i':[0]*3, 'b':[False]*3})
vc = df.i.value_counts()
result = vc.get(99,default='Missing')
self.assertEqual(result,'Missing')
vc = df.b.value_counts()
result = vc.get(False,default='Missing')
self.assertEqual(result,3)
result = vc.get(True,default='Missing')
self.assertEqual(result,'Missing')
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1,5),index=lrange(1,5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2,5),index=lrange(2,5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
self.assertRaises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index([], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index([], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index([], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index([], dtype='object')))
def test_getitem_preserve_name(self):
result = self.ts[self.ts > 0]
self.assertEqual(result.name, self.ts.name)
result = self.ts[[0, 2, 4]]
self.assertEqual(result.name, self.ts.name)
result = self.ts[5:10]
self.assertEqual(result.name, self.ts.name)
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
self.assertTrue((result == 5).all())
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
self.assertRaises(IndexError, s.__getitem__, -11)
self.assertRaises(IndexError, s.__setitem__, -11, 'foo')
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(lrange(0, len(index)), index=index, name='sth')
expected = ["first second",
"foo one 0",
" two 1",
" three 2",
"bar one 3",
" two 4",
"baz two 5",
" three 6",
"qux one 7",
" two 8",
" three 9",
"Name: sth, dtype: int64"]
expected = "\n".join(expected)
self.assertEqual(repr(s), expected)
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.ix['foo']
self.assertEqual(result.name, s.name)
self.assertEqual(result2.name, s.name)
def test_name_printing(self):
# test small series
s = Series([0, 1, 2])
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
# test big series (diff code path)
s = Series(lrange(0, 1000))
s.name = "test"
self.assertIn("Name: test", repr(s))
s.name = None
self.assertNotIn("Name:", repr(s))
s = Series(index=date_range('20010101', '20020101'), name='test')
self.assertIn("Name: test", repr(s))
def test_pickle_preserve_name(self):
unpickled = self._pickle_roundtrip_name(self.ts)
self.assertEqual(unpickled.name, self.ts.name)
def _pickle_roundtrip_name(self, obj):
with ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_argsort_preserve_name(self):
result = self.ts.argsort()
self.assertEqual(result.name, self.ts.name)
def test_sort_index_name(self):
result = self.ts.sort_index(ascending=False)
self.assertEqual(result.name, self.ts.name)
def test_to_sparse_pass_name(self):
result = self.ts.to_sparse()
self.assertEqual(result.name, self.ts.name)
class TestNanops(tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
s == s2
s2 == s
def test_sum_zero(self):
arr = np.array([])
self.assertEqual(nanops.nansum(arr), 0)
arr = np.empty((10, 0))
self.assertTrue((nanops.nansum(arr, axis=1) == 0).all())
# GH #844
s = Series([], index=[])
self.assertEqual(s.sum(), 0)
df = DataFrame(np.empty((10, 0)))
self.assertTrue((df.sum(1) == 0).all())
def test_nansum_buglet(self):
s = Series([1.0, np.nan], index=[0, 1])
result = np.nansum(s)
assert_almost_equal(result, 1)
def test_overflow(self):
# GH 6915
# overflowing on the smaller int dtypes
for dtype in ['int32','int64']:
v = np.arange(5000000,dtype=dtype)
s = Series(v)
# no bottleneck
result = s.sum(skipna=False)
self.assertEqual(int(result),v.sum(dtype='int64'))
result = s.min(skipna=False)
self.assertEqual(int(result),0)
result = s.max(skipna=False)
self.assertEqual(int(result),v[-1])
# use bottleneck if available
result = s.sum()
self.assertEqual(int(result),v.sum(dtype='int64'))
result = s.min()
self.assertEqual(int(result),0)
result = s.max()
self.assertEqual(int(result),v[-1])
for dtype in ['float32', 'float64']:
v = np.arange(5000000, dtype=dtype)
s = Series(v)
# no bottleneck
result = s.sum(skipna=False)
self.assertEqual(result, v.sum(dtype=dtype))
result = s.min(skipna=False)
self.assertTrue(np.allclose(float(result), 0.0))
result = s.max(skipna=False)
self.assertTrue(np.allclose(float(result), v[-1]))
# use bottleneck if available
result = s.sum()
self.assertEqual(result, v.sum(dtype=dtype))
result = s.min()
self.assertTrue(np.allclose(float(result), 0.0))
result = s.max()
self.assertTrue(np.allclose(float(result), v[-1]))
class SafeForSparse(object):
pass
_ts = tm.makeTimeSeries()
class TestSeries(tm.TestCase, CheckNameIntegration):
_multiprocess_can_split_ = True
def setUp(self):
import warnings
self.ts = _ts.copy()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty = Series([], index=[])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
self.assertNotIsInstance(scalar, float)
# coercion
self.assertEqual(float(Series([1.])), 1.0)
self.assertEqual(int(Series([1.])), 1)
self.assertEqual(long(Series([1.])), 1)
def test_astype(self):
s = Series(np.random.randn(5),name='foo')
for dtype in ['float32','float64','int64','int32']:
astyped = s.astype(dtype)
self.assertEqual(astyped.dtype, dtype)
self.assertEqual(astyped.name, s.name)
def test_TimeSeries_deprecation(self):
# deprecation TimeSeries, #10890
with tm.assert_produces_warning(FutureWarning):
pd.TimeSeries(1,index=date_range('20130101',periods=3))
def test_constructor(self):
# Recognize TimeSeries
with tm.assert_produces_warning(FutureWarning):
self.assertTrue(self.ts.is_time_series)
self.assertTrue(self.ts.index.is_all_dates)
# Pass in Series
derived = Series(self.ts)
with tm.assert_produces_warning(FutureWarning):
self.assertTrue(derived.is_time_series)
self.assertTrue(derived.index.is_all_dates)
self.assertTrue(tm.equalContents(derived.index, self.ts.index))
# Ensure new index is not created
self.assertEqual(id(self.ts.index), id(derived.index))
# Mixed type Series
mixed = Series(['hello', np.NaN], index=[0, 1])
self.assertEqual(mixed.dtype, np.object_)
self.assertIs(mixed[1], np.NaN)
with tm.assert_produces_warning(FutureWarning):
self.assertFalse(self.empty.is_time_series)
self.assertFalse(self.empty.index.is_all_dates)
with tm.assert_produces_warning(FutureWarning):
self.assertFalse(Series({}).is_time_series)
self.assertFalse(Series({}).index.is_all_dates)
self.assertRaises(Exception, Series, np.random.randn(3, 3),
index=np.arange(3))
mixed.name = 'Series'
rs = Series(mixed).name
xp = 'Series'
self.assertEqual(rs, xp)
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
self.assertRaises(NotImplementedError, Series, m)
def test_constructor_empty(self):
empty = Series()
empty2 = Series([])
assert_series_equal(empty, empty2, check_index_type=False)
empty = Series(index=lrange(10))
empty2 = Series(np.nan, index=lrange(10))
assert_series_equal(empty, empty2)
def test_constructor_series(self):
index1 = ['d', 'b', 'a', 'c']
index2 = sorted(index1)
s1 = Series([4, 7, -5, 3], index=index1)
s2 = Series(s1, index=index2)
assert_series_equal(s2, s1.sort_index())
def test_constructor_iterator(self):
expected = Series(list(range(10)),dtype='int64')
result = Series(range(10),dtype='int64')
assert_series_equal(result, expected)
def test_constructor_generator(self):
gen = (i for i in range(10))
result = Series(gen)
exp = Series(lrange(10))
assert_series_equal(result, exp)
gen = (i for i in range(10))
result = Series(gen, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_map(self):
# GH8909
m = map(lambda x: x, range(10))
result = Series(m)
exp = Series(lrange(10))
assert_series_equal(result, exp)
m = map(lambda x: x, range(10))
result = Series(m, index=lrange(10, 20))
exp.index = lrange(10, 20)
assert_series_equal(result, exp)
def test_constructor_categorical(self):
cat = pd.Categorical([0, 1, 2, 0, 1, 2], ['a', 'b', 'c'], fastpath=True)
res = Series(cat)
self.assertTrue(res.values.equals(cat))
def test_constructor_maskedarray(self):
data = ma.masked_all((3,), dtype=float)
result = Series(data)
expected = Series([nan, nan, nan])
assert_series_equal(result, expected)
data[0] = 0.0
data[2] = 2.0
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0.0, nan, 2.0], index=index)
assert_series_equal(result, expected)
data[1] = 1.0
result = Series(data, index=index)
expected = Series([0.0, 1.0, 2.0], index=index)
assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=int)
result = Series(data)
expected = Series([nan, nan, nan], dtype=float)
assert_series_equal(result, expected)
data[0] = 0
data[2] = 2
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([0, nan, 2], index=index, dtype=float)
assert_series_equal(result, expected)
data[1] = 1
result = Series(data, index=index)
expected = Series([0, 1, 2], index=index, dtype=int)
assert_series_equal(result, expected)
data = ma.masked_all((3,), dtype=bool)
result = Series(data)
expected = Series([nan, nan, nan], dtype=object)
assert_series_equal(result, expected)
data[0] = True
data[2] = False
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([True, nan, False], index=index, dtype=object)
assert_series_equal(result, expected)
data[1] = True
result = Series(data, index=index)
expected = Series([True, True, False], index=index, dtype=bool)
assert_series_equal(result, expected)
from pandas import tslib
data = ma.masked_all((3,), dtype='M8[ns]')
result = Series(data)
expected = Series([tslib.iNaT, tslib.iNaT, tslib.iNaT], dtype='M8[ns]')
assert_series_equal(result, expected)
data[0] = datetime(2001, 1, 1)
data[2] = datetime(2001, 1, 3)
index = ['a', 'b', 'c']
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), tslib.iNaT,
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
data[1] = datetime(2001, 1, 2)
result = Series(data, index=index)
expected = Series([datetime(2001, 1, 1), datetime(2001, 1, 2),
datetime(2001, 1, 3)], index=index, dtype='M8[ns]')
assert_series_equal(result, expected)
def test_constructor_default_index(self):
s = Series([0, 1, 2])
assert_almost_equal(s.index, np.arange(3))
def test_constructor_corner(self):
df = tm.makeTimeDataFrame()
objs = [df, df]
s = Series(objs, index=[0, 1])
tm.assertIsInstance(s, Series)
def test_constructor_sanitize(self):
s = Series(np.array([1., 1., 8.]), dtype='i8')
self.assertEqual(s.dtype, np.dtype('i8'))
s = Series(np.array([1., 1., np.nan]), copy=True, dtype='i8')
self.assertEqual(s.dtype, np.dtype('f8'))
def test_constructor_pass_none(self):
s = Series(None, index=lrange(5))
self.assertEqual(s.dtype, np.float64)
s = Series(None, index=lrange(5), dtype=object)
self.assertEqual(s.dtype, np.object_)
# GH 7431
# inference on the index
s = Series(index=np.array([None]))
expected = Series(index=Index([None]))
assert_series_equal(s,expected)
def test_constructor_cast(self):
self.assertRaises(ValueError, Series, ['a', 'b', 'c'], dtype=float)
def test_constructor_dtype_nocast(self):
# 1572
s = Series([1, 2, 3])
s2 = Series(s, dtype=np.int64)
s2[1] = 5
self.assertEqual(s[1], 5)
def test_constructor_datelike_coercion(self):
# GH 9477
# incorrectly infering on dateimelike looking when object dtype is specified
s = Series([Timestamp('20130101'),'NOV'],dtype=object)
self.assertEqual(s.iloc[0],Timestamp('20130101'))
self.assertEqual(s.iloc[1],'NOV')
self.assertTrue(s.dtype == object)
# the dtype was being reset on the slicing and re-inferred to datetime even
# thought the blocks are mixed
belly = '216 3T19'.split()
wing1 = '2T15 4H19'.split()
wing2 = '416 4T20'.split()
mat = pd.to_datetime('2016-01-22 2019-09-07'.split())
df = pd.DataFrame({'wing1':wing1, 'wing2':wing2, 'mat':mat}, index=belly)
result = df.loc['3T19']
self.assertTrue(result.dtype == object)
result = df.loc['216']
self.assertTrue(result.dtype == object)
def test_constructor_dtype_datetime64(self):
import pandas.tslib as tslib
s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))
self.assertTrue(isnull(s).all())
# in theory this should be all nulls, but since
# we are not specifying a dtype is ambiguous
s = Series(tslib.iNaT, index=lrange(5))
self.assertFalse(isnull(s).all())
s = Series(nan, dtype='M8[ns]', index=lrange(5))
self.assertTrue(isnull(s).all())
s = Series([datetime(2001, 1, 2, 0, 0), tslib.iNaT], dtype='M8[ns]')
self.assertTrue(isnull(s[1]))
self.assertEqual(s.dtype, 'M8[ns]')
s = Series([datetime(2001, 1, 2, 0, 0), nan], dtype='M8[ns]')
self.assertTrue(isnull(s[1]))
self.assertEqual(s.dtype, 'M8[ns]')
# GH3416
dates = [
np.datetime64(datetime(2013, 1, 1)),
np.datetime64(datetime(2013, 1, 2)),
np.datetime64(datetime(2013, 1, 3)),
]
s = Series(dates)
self.assertEqual(s.dtype, 'M8[ns]')
s.ix[0] = np.nan
self.assertEqual(s.dtype, 'M8[ns]')
# invalid astypes
for t in ['s', 'D', 'us', 'ms']:
self.assertRaises(TypeError, s.astype, 'M8[%s]' % t)
# GH3414 related
self.assertRaises(TypeError, lambda x: Series(
Series(dates).astype('int') / 1000000, dtype='M8[ms]'))
self.assertRaises(
TypeError, lambda x: Series(dates, dtype='datetime64'))
# invalid dates can be help as object
result = Series([datetime(2,1,1)])
self.assertEqual(result[0], datetime(2,1,1,0,0))
result = Series([datetime(3000,1,1)])
self.assertEqual(result[0], datetime(3000,1,1,0,0))
# don't mix types
result = Series([ Timestamp('20130101'), 1],index=['a','b'])
self.assertEqual(result['a'], Timestamp('20130101'))
self.assertEqual(result['b'], 1)
# GH6529
# coerce datetime64 non-ns properly
dates = date_range('01-Jan-2015', '01-Dec-2015', freq='M')
values2 = dates.view(np.ndarray).astype('datetime64[ns]')
expected = Series(values2, dates)
for dtype in ['s', 'D', 'ms', 'us', 'ns']:
values1 = dates.view(np.ndarray).astype('M8[{0}]'.format(dtype))
result = Series(values1, dates)
assert_series_equal(result,expected)
# leave datetime.date alone
dates2 = np.array([d.date() for d in dates.to_pydatetime()],
dtype=object)
series1 = Series(dates2, dates)
self.assert_numpy_array_equal(series1.values,dates2)
self.assertEqual(series1.dtype,object)
# these will correctly infer a datetime
s = Series([None, pd.NaT, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
s = Series([np.nan, pd.NaT, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
s = Series([pd.NaT, None, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
s = Series([pd.NaT, np.nan, '2013-08-05 15:30:00.000001'])
self.assertEqual(s.dtype,'datetime64[ns]')
# tz-aware (UTC and other tz's)
# GH 8411
dr = date_range('20130101',periods=3)
self.assertTrue(Series(dr).iloc[0].tz is None)
dr = date_range('20130101',periods=3,tz='UTC')
self.assertTrue(str(Series(dr).iloc[0].tz) == 'UTC')
dr = date_range('20130101',periods=3,tz='US/Eastern')
self.assertTrue(str(Series(dr).iloc[0].tz) == 'US/Eastern')
# non-convertible
s = Series([1479596223000, -1479590, pd.NaT])
self.assertTrue(s.dtype == 'object')
self.assertTrue(s[2] is pd.NaT)
self.assertTrue('NaT' in str(s))
# if we passed a NaT it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), pd.NaT])
self.assertTrue(s.dtype == 'object')
self.assertTrue(s[2] is pd.NaT)
self.assertTrue('NaT' in str(s))
# if we passed a nan it remains
s = Series([datetime(2010, 1, 1), datetime(2, 1, 1), np.nan])
self.assertTrue(s.dtype == 'object')
self.assertTrue(s[2] is np.nan)
self.assertTrue('NaN' in str(s))
def test_constructor_with_datetime_tz(self):
# 8260
# support datetime64 with tz
dr = date_range('20130101',periods=3,tz='US/Eastern')
s = Series(dr)
self.assertTrue(s.dtype.name == 'datetime64[ns, US/Eastern]')
self.assertTrue(s.dtype == 'datetime64[ns, US/Eastern]')
self.assertTrue(com.is_datetime64tz_dtype(s.dtype))
self.assertTrue('datetime64[ns, US/Eastern]' in str(s))
# export
result = s.values
self.assertIsInstance(result, np.ndarray)
self.assertTrue(result.dtype == 'datetime64[ns]')
self.assertTrue(dr.equals(pd.DatetimeIndex(result).tz_localize('UTC').tz_convert(tz=s.dt.tz)))
# indexing
result = s.iloc[0]
self.assertEqual(result,Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern', offset='D'))
result = s[0]
self.assertEqual(result,Timestamp('2013-01-01 00:00:00-0500', tz='US/Eastern', offset='D'))
result = s[Series([True,True,False],index=s.index)]
assert_series_equal(result,s[0:2])
result = s.iloc[0:1]
assert_series_equal(result,Series(dr[0:1]))
# concat
result = pd.concat([s.iloc[0:1],s.iloc[1:]])
assert_series_equal(result,s)
# astype
result = s.astype(object)
expected = Series(DatetimeIndex(s._values).asobject)
assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize('UTC').dt.tz_convert(s.dt.tz)
assert_series_equal(result, s)
# astype - datetime64[ns, tz]
result = Series(s.values).astype('datetime64[ns, US/Eastern]')
assert_series_equal(result, s)
result = Series(s.values).astype(s.dtype)
assert_series_equal(result, s)
result = s.astype('datetime64[ns, CET]')
expected = Series(date_range('20130101 06:00:00',periods=3,tz='CET'))
assert_series_equal(result, expected)
# short str
self.assertTrue('datetime64[ns, US/Eastern]' in str(s))
# formatting with NaT
result = s.shift()
self.assertTrue('datetime64[ns, US/Eastern]' in str(result))
self.assertTrue('NaT' in str(result))
# long str
t = Series(date_range('20130101',periods=1000,tz='US/Eastern'))
self.assertTrue('datetime64[ns, US/Eastern]' in str(t))
result = pd.DatetimeIndex(s,freq='infer')
tm.assert_index_equal(result, dr)
# inference
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')])
self.assertTrue(s.dtype == 'datetime64[ns, US/Pacific]')
self.assertTrue(lib.infer_dtype(s) == 'datetime64')
s = Series([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Eastern')])
self.assertTrue(s.dtype == 'object')
self.assertTrue(lib.infer_dtype(s) == 'datetime')
def test_constructor_periodindex(self):
# GH7932
# converting a PeriodIndex when put in a Series
pi = period_range('20130101',periods=5,freq='D')
s = Series(pi)
expected = Series(pi.asobject)
assert_series_equal(s, expected)
def test_constructor_dict(self):
d = {'a': 0., 'b': 1., 'c': 2.}
result = Series(d, index=['b', 'c', 'd', 'a'])
expected = Series([1, 2, nan, 0], index=['b', 'c', 'd', 'a'])
assert_series_equal(result, expected)
pidx = tm.makePeriodIndex(100)
d = {pidx[0]: 0, pidx[1]: 1}
result = Series(d, index=pidx)
expected = Series(np.nan, pidx)
expected.ix[0] = 0
expected.ix[1] = 1
assert_series_equal(result, expected)
def test_constructor_dict_multiindex(self):
check = lambda result, expected: tm.assert_series_equal(
result, expected, check_dtype=True, check_index_type=True,
check_series_type=True)
d = {('a', 'a'): 0., ('b', 'a'): 1., ('b', 'c'): 2.}
_d = sorted(d.items())
ser = Series(d)
expected = Series([x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d]))
check(ser, expected)
d['z'] = 111.
_d.insert(0, ('z', d['z']))
ser = Series(d)
expected = Series(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False))
ser = ser.reindex(index=expected.index)
check(ser, expected)
def test_constructor_subclass_dict(self):
data = tm.TestSubDict((x, 10.0 * x) for x in range(10))
series = Series(data)
refseries = Series(dict(compat.iteritems(data)))
assert_series_equal(refseries, series)
def test_constructor_dict_datetime64_index(self):
# GH 9456
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
values = [42544017.198965244, 1234565, 40512335.181958228, -1]
def create_data(constructor):
return dict(zip((constructor(x) for x in dates_as_str), values))
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = Series(values, (Timestamp(x) for x in dates_as_str))
result_datetime64 = Series(data_datetime64)
result_datetime = Series(data_datetime)
result_Timestamp = Series(data_Timestamp)
assert_series_equal(result_datetime64, expected)
assert_series_equal(result_datetime, expected)
assert_series_equal(result_Timestamp, expected)
def test_orderedDict_ctor(self):
# GH3283
import pandas
import random
data = OrderedDict([('col%s' % i, random.random()) for i in range(12)])
s = pandas.Series(data)
self.assertTrue(all(s.values == list(data.values())))
def test_orderedDict_subclass_ctor(self):
# GH3283
import pandas
import random
class A(OrderedDict):
pass
data = A([('col%s' % i, random.random()) for i in range(12)])
s = pandas.Series(data)
self.assertTrue(all(s.values == list(data.values())))
def test_constructor_list_of_tuples(self):
data = [(1, 1), (2, 2), (2, 3)]
s = Series(data)
self.assertEqual(list(s), data)
def test_constructor_tuple_of_tuples(self):
data = ((1, 1), (2, 2), (2, 3))
s = Series(data)
self.assertEqual(tuple(s), data)
def test_constructor_set(self):
values = set([1, 2, 3, 4, 5])
self.assertRaises(TypeError, Series, values)
values = frozenset(values)
self.assertRaises(TypeError, Series, values)
def test_fromDict(self):
data = {'a': 0, 'b': 1, 'c': 2, 'd': 3}
series = Series(data)
self.assertTrue(tm.is_sorted(series.index))
data = {'a': 0, 'b': '1', 'c': '2', 'd': datetime.now()}
series = Series(data)
self.assertEqual(series.dtype, np.object_)
data = {'a': 0, 'b': '1', 'c': '2', 'd': '3'}
series = Series(data)
self.assertEqual(series.dtype, np.object_)
data = {'a': '0', 'b': '1'}
series = Series(data, dtype=float)
self.assertEqual(series.dtype, np.float64)
def test_setindex(self):
# wrong type
series = self.series.copy()
self.assertRaises(TypeError, setattr, series, 'index', None)
# wrong length
series = self.series.copy()
self.assertRaises(Exception, setattr, series, 'index',
np.arange(len(series) - 1))
# works
series = self.series.copy()
series.index = np.arange(len(series))
tm.assertIsInstance(series.index, Index)
def test_array_finalize(self):
pass
def test_pop(self):
# GH 6600
df = DataFrame({
'A': 0,
'B': np.arange(5,dtype='int64'),
'C': 0,
})
k = df.iloc[4]
result = k.pop('B')
self.assertEqual(result, 4)
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_not_hashable(self):
s_empty = Series()
s = Series([1])
self.assertRaises(TypeError, hash, s_empty)
self.assertRaises(TypeError, hash, s)
def test_fromValue(self):
nans = Series(np.NaN, index=self.ts.index)
self.assertEqual(nans.dtype, np.float_)
self.assertEqual(len(nans), len(self.ts))
strings = Series('foo', index=self.ts.index)
self.assertEqual(strings.dtype, np.object_)
self.assertEqual(len(strings), len(self.ts))
d = datetime.now()
dates = Series(d, index=self.ts.index)
self.assertEqual(dates.dtype, 'M8[ns]')
self.assertEqual(len(dates), len(self.ts))
def test_contains(self):
tm.assert_contains_all(self.ts.index, self.ts)
def test_pickle(self):
unp_series = self._pickle_roundtrip(self.series)
unp_ts = self._pickle_roundtrip(self.ts)
assert_series_equal(unp_series, self.series)
assert_series_equal(unp_ts, self.ts)
def _pickle_roundtrip(self, obj):
with ensure_clean() as path:
obj.to_pickle(path)
unpickled = pd.read_pickle(path)
return unpickled
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
self.assertEqual(self.series[idx1], self.series.get(idx1))
self.assertEqual(self.objSeries[idx2], self.objSeries.get(idx2))
self.assertEqual(self.series[idx1], self.series[5])
self.assertEqual(self.objSeries[idx2], self.objSeries[5])
self.assertEqual(
self.series.get(-1), self.series.get(self.series.index[-1]))
self.assertEqual(self.series[5], self.series.get(self.series.index[5]))
# missing
d = self.ts.index[0] - datetools.bday
self.assertRaises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
self.assertIsNone(result)
def test_iget(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.irow(1)
# 10711, deprecated
with tm.assert_produces_warning(FutureWarning):
s.iget_value(1)
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.ix[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
self.assertTrue((s[1:3] == 0).all())
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iget_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
self.assertEqual(s.iloc[2], 2)
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
self.assertTrue((s == 0).all())
s[:-12] = 5
self.assertTrue((s == 0).all())
def test_getitem_int64(self):
idx = np.int64(5)
self.assertEqual(self.ts[idx], self.ts[5])
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
self.assertEqual(self.series.index[2], slice1.index[1])
self.assertEqual(self.objSeries.index[2], slice2.index[1])
self.assertEqual(self.series[2], slice1[1])
self.assertEqual(self.objSeries[2], slice2[1])
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
self.assert_numpy_array_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
self.assertEqual(s.index.name, 'index_name')
self.assertEqual(s.dtype, np.int64)
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan,index=['C'],dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
self.assertRaises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
self.assertRaises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
self.assertRaises(Exception, s.__getitem__, omask)
self.assertRaises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=datetools.bday) > ts.median()
# these used to raise...??
self.assertRaises(Exception, ts.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.__setitem__, mask_shifted, 1)
#ts[mask_shifted]
#ts[mask_shifted] = 1
self.assertRaises(Exception, ts.ix.__getitem__, mask_shifted)
self.assertRaises(Exception, ts.ix.__setitem__, mask_shifted, 1)
#ts.ix[mask_shifted]
#ts.ix[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
self.assertTrue((s[:4] == 0).all())
self.assertTrue(not (s[4:] == 0).any())
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
self.assertRaises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
self.assertRaises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
self.assertEqual(s.ix[0], s['a'])
s.ix[0] = 5
self.assertAlmostEqual(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
tm.assertIsInstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
self.assertRaises(KeyError, s.__getitem__, 1)
self.assertRaises(KeyError, s.ix.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
self.assertTrue(np.isscalar(obj['c']))
self.assertEqual(obj['c'], 0)
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .ix internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.ix[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5),index=['A','A','B','C','C'],dtype=np.int64)
expected = Series([3,4],index=['C','C'],dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
self.assertRaises(TypeError, s.__getitem__, df>5)
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5],index=[1]))
assert_series_equal(s2,expected)
s2 = s.copy()
s2.ix[1] = 5
expected = s.append(Series([5],index=[1]))
assert_series_equal(s2,expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.ix[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
self.assertNotIn(self.series.index[9], numSlice.index)
self.assertNotIn(self.objSeries.index[9], objSlice.index)
self.assertEqual(len(numSlice), len(numSlice.index))
self.assertEqual(self.series[numSlice.index[0]],
numSlice[numSlice.index[0]])
self.assertEqual(numSlice.index[1], self.series.index[11])
self.assertTrue(tm.equalContents(numSliceEnd,
np.array(self.series)[-10:]))
# test return view
sl = self.series[10:20]
sl[:] = 0
self.assertTrue((self.series[10:20] == 0).all())
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
result = s[::-1] # it works!
def test_slice_float_get_set(self):
self.assertRaises(TypeError, lambda : self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
self.assertRaises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
self.assertEqual(len(s.ix[12.0:]), 8)
self.assertEqual(len(s.ix[12.5:]), 7)
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
self.assertTrue(np.isnan(self.ts[6]))
self.assertTrue(np.isnan(self.ts[2]))
self.ts[np.isnan(self.ts)] = 5
self.assertFalse(np.isnan(self.ts[2]))
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
self.assertTrue((series[::2] == 0).all())
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan,2,3])
s = Series([1,2,3])
s.iloc[0] = np.nan
assert_series_equal(s,expected)
s = Series([1,2,3])
s.loc[0] = np.nan
assert_series_equal(s,expected)
s = Series([1,2,3])
s[0] = np.nan
assert_series_equal(s,expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s,Series([np.nan]))
s = Series([False,True])
s.loc[0] = np.nan
assert_series_equal(s,Series([np.nan,1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
self.assertIs(res, self.ts)
self.assertEqual(self.ts[idx], 0)
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
self.assertIs(res, s)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res['foobar'], 0)
s = self.series.copy()
s.loc['foobar'] = 0
self.assertEqual(s.index[-1], 'foobar')
self.assertEqual(s['foobar'], 0)
def test_setslice(self):
sl = self.ts[5:20]
self.assertEqual(len(sl), len(sl.index))
self.assertTrue(sl.index.is_unique)
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assertRaisesRegexp(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
self.assertRaises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
self.assertRaises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_reshape_non_2d(self):
# GH 4554
x = Series(np.random.random(201), name='x')
self.assertTrue(x.reshape(x.shape,) is x)
# GH 2719
a = Series([1, 2, 3, 4])
result = a.reshape(2, 2)
expected = a.values.reshape(2, 2)
tm.assert_numpy_array_equal(result, expected)
self.assertTrue(type(result) is type(expected))
def test_reshape_2d_return_array(self):
x = Series(np.random.random(201), name='x')
result = x.reshape((-1, 1))
self.assertNotIsInstance(result, Series)
result2 = np.reshape(x, (-1, 1))
self.assertNotIsInstance(result2, Series)
result = x[:, None]
expected = x.reshape((-1, 1))
assert_almost_equal(result, expected)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.ix[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.ix[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.ix[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.ix[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.ix[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
self.assertRaises(Exception, s.__setitem__, inds_notfound, 0)
self.assertRaises(Exception, s.__setitem__, arr_inds_notfound, 0)
def test_ix_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.ix[inds], self.series.reindex(inds))
assert_series_equal(self.series.ix[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.ix[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.ix[mask], self.series[mask])
# ask for index value
self.assertEqual(self.ts.ix[d1], self.ts[d1])
self.assertEqual(self.ts.ix[d2], self.ts[d2])
def test_ix_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
self.assertRaises(KeyError, ts2.ix.__getitem__, slice(d1, d2))
self.assertRaises(KeyError, ts2.ix.__setitem__, slice(d1, d2), 0)
def test_ix_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.ix[4:10] = 0
self.assertTrue((cp.ix[4:10] == 0).all())
# so is this
cp = s.copy()
cp.ix[3:11] = 0
self.assertTrue((cp.ix[3:11] == 0).values.all())
result = s.ix[4:10]
result2 = s.ix[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
self.assertRaises(KeyError, s2.ix.__getitem__, slice(3, 11))
self.assertRaises(KeyError, s2.ix.__setitem__, slice(3, 11), 0)
def test_ix_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.ix[idx]
assert_series_equal(result, self.series[:10])
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert(s.shape == rs.shape)
assert(rs is not s)
# test alignment
cond = Series([True,False,False,True,False],index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.ix[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
self.assertRaises(ValueError, s.where, 1)
self.assertRaises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
self.assertRaises(
ValueError, s.__setitem__, tuple([[[True, False]]]), [0, 2, 3])
self.assertRaises(
ValueError, s.__setitem__, tuple([[[True, False]]]), [])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
self.assertRaises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
self.assertEqual(s.dtype, expected.dtype)
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5,4,3,2,1]
self.assertRaises(ValueError, f)
def f():
s[mask] = [0] * 5
self.assertRaises(ValueError, f)
# dtype changes
s = Series([1,2,3,4])
result = s.where(s>2,np.nan)
expected = Series([np.nan,np.nan,3,4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
self.assertTrue(isnull(result))
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan,index=[9])
assert_series_equal(result, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
self.assertRaises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0,1,2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0,'b',1,'d','e','f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
self.assertRaises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a','b','c',0,1,'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0,1,2]] = list(range(27))
self.assertRaises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0,1,2]] = list(range(2))
self.assertRaises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)),'b','c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [np.resize([True, False, False, False, False], size), # First element should be set
# Set alternating elements]
np.resize([True, False], size),
np.resize([False], size)]: # No element should be set
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item,)]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[i]
for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1,s2])
result = comb.where(comb < 2)
expected = Series([0,1,np.nan,0,1,np.nan],index=[0,1,2,0,1,2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb<1] = 5
expected = Series([5,1,2,5,1,2],index=[0,1,2,0,1,2])
assert_series_equal(comb, expected)
comb[comb<2] += 10
expected = Series([5,11,2,5,11,2],index=[0,1,2,0,1,2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
self.assertRaises(ValueError, s.mask, 1)
self.assertRaises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1,2,3,4])
result = s.mask(s>2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [np.resize([True, False, False, False, False], size), # First element should be set
# Set alternating elements]
np.resize([True, False], size),
np.resize([False], size)]: # No element should be set
for item in [2.0, np.nan, np.finfo(np.float).max, np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item,)]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[i]
for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_drop(self):
# unique
s = Series([1,2],index=['one','two'])
expected = Series([1],index=['one'])
result = s.drop(['two'])
assert_series_equal(result,expected)
result = s.drop('two', axis='rows')
assert_series_equal(result,expected)
# non-unique
# GH 5248
s = Series([1,1,2],index=['one','two','one'])
expected = Series([1,2],index=['one','one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result,expected)
result = s.drop('two')
assert_series_equal(result,expected)
expected = Series([1],index=['two'])
result = s.drop(['one'])
assert_series_equal(result,expected)
result = s.drop('one')
assert_series_equal(result,expected)
# single string/tuple-like
s = Series(range(3),index=list('abc'))
self.assertRaises(ValueError, s.drop, 'bc')
self.assertRaises(ValueError, s.drop, ('a',))
# errors='ignore'
s = Series(range(3),index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.ix[1:]
assert_series_equal(result, expected)
# bad axis
self.assertRaises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2,3], index=[True, False])
self.assertTrue(s.index.is_object())
result = s.drop(True)
expected = Series([3],index=[False])
assert_series_equal(result,expected)
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.ix[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.ix[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.ix[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.ix[d1] = 4
self.series.ix[d2] = 6
self.assertEqual(self.series[d1], 4)
self.assertEqual(self.series[d2], 6)
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s>1, 'X')
self.assertFalse(com.is_integer(w[0]))
self.assertTrue(com.is_integer(w[1]))
self.assertTrue(com.is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s>1, ['X', 'Y', 'Z'])
self.assertFalse(com.is_integer(w[0]))
self.assertTrue(com.is_integer(w[1]))
self.assertTrue(com.is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
w = s.where(s>1, np.array(['X', 'Y', 'Z']))
self.assertFalse(com.is_integer(w[0]))
self.assertTrue(com.is_integer(w[1]))
self.assertTrue(com.is_integer(w[2]))
self.assertTrue(isinstance(w[0], str))
self.assertTrue(w.dtype == 'object')
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.ix[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.ix[inds] = 5
self.assertRaises(Exception, self.series.ix.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# with Nones
ots = self.ts.astype('O')
ots[::2] = None
repr(ots)
# various names
for name in ['', 1, 1.2, 'foo', u('\u03B1\u03B2\u03B3'),
'loooooooooooooooooooooooooooooooooooooooooooooooooooong',
('foo', 'bar', 'baz'),
(1, 2),
('foo', 1, 2.3),
(u('\u03B1'), u('\u03B2'), u('\u03B3')),
(u('\u03B1'), 'bar')]:
self.series.name = name
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
# 0 as name
ser = Series(np.random.randn(100), name=0)
rep_str = repr(ser)
self.assertIn("Name: 0", rep_str)
# tidy repr
ser = Series(np.random.randn(1001), name=0)
rep_str = repr(ser)
self.assertIn("Name: 0", rep_str)
ser = Series(["a\n\r\tb"], name=["a\n\r\td"], index=["a\n\r\tf"])
self.assertFalse("\t" in repr(ser))
self.assertFalse("\r" in repr(ser))
self.assertFalse("a\n" in repr(ser))
# with empty series (#4651)
s = Series([], dtype=np.int64, name='foo')
self.assertEqual(repr(s), 'Series([], Name: foo, dtype: int64)')
s = Series([], dtype=np.int64, name=None)
self.assertEqual(repr(s), 'Series([], dtype: int64)')
def test_tidy_repr(self):
a = Series([u("\u05d0")] * 1000)
a.name = 'title1'
repr(a) # should not raise exception
def test_repr_bool_fails(self):
s = Series([DataFrame(np.random.randn(2, 2)) for i in range(5)])
import sys
buf = StringIO()
tmp = sys.stderr
sys.stderr = buf
try:
# it works (with no Cython exception barf)!
repr(s)
finally:
sys.stderr = tmp
self.assertEqual(buf.getvalue(), '')
def test_repr_name_iterable_indexable(self):
s = Series([1, 2, 3], name=np.int64(3))
# it works!
repr(s)
s.name = (u("\u05d0"),) * 2
repr(s)
def test_repr_should_return_str(self):
# http://docs.python.org/py3k/reference/datamodel.html#object.__repr__
# http://docs.python.org/reference/datamodel.html#object.__repr__
# ...The return value must be a string object.
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"), u("\u03c6")]
df = Series(data, index=index1)
self.assertTrue(type(df.__repr__() == str)) # both py2 / 3
def test_repr_max_rows(self):
# GH 6863
with pd.option_context('max_rows', None):
str(Series(range(1001))) # should not raise exception
def test_unicode_string_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
str(df)
else:
compat.text_type(df)
def test_bytestring_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
bytes(df)
else:
str(df)
def test_timeseries_repr_object_dtype(self):
index = Index([datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)], dtype=object)
ts = Series(np.random.randn(len(index)), index)
repr(ts)
ts = tm.makeTimeSeries(1000)
self.assertTrue(repr(ts).splitlines()[-1].startswith('Freq:'))
ts2 = ts.ix[np.random.randint(0, len(ts) - 1, 400)]
repr(ts2).splitlines()[-1]
def test_timeseries_periodindex(self):
# GH2891
from pandas import period_range
prng = period_range('1/1/2011', '1/1/2012', freq='M')
ts = Series(np.random.randn(len(prng)), prng)
new_ts = self.round_trip_pickle(ts)
self.assertEqual(new_ts.index.freq, 'M')
def test_iter(self):
for i, val in enumerate(self.series):
self.assertEqual(val, self.series[i])
for i, val in enumerate(self.ts):
self.assertEqual(val, self.ts[i])
def test_keys(self):
# HACK: By doing this in two stages, we avoid 2to3 wrapping the call
# to .keys() in a list()
getkeys = self.ts.keys
self.assertIs(getkeys(), self.ts.index)
def test_values(self):
self.assert_numpy_array_equal(self.ts, self.ts.values)
def test_iteritems(self):
for idx, val in compat.iteritems(self.series):
self.assertEqual(val, self.series[idx])
for idx, val in compat.iteritems(self.ts):
self.assertEqual(val, self.ts[idx])
# assert is lazy (genrators don't define reverse, lists do)
self.assertFalse(hasattr(self.series.iteritems(), 'reverse'))
def test_sum(self):
self._check_stat_op('sum', np.sum, check_allna=True)
def test_sum_inf(self):
import pandas.core.nanops as nanops
s = Series(np.random.randn(10))
s2 = s.copy()
s[5:8] = np.inf
s2[5:8] = np.nan
self.assertTrue(np.isinf(s.sum()))
arr = np.random.randn(100, 100).astype('f4')
arr[:, 2] = np.inf
with cf.option_context("mode.use_inf_as_null", True):
assert_almost_equal(s.sum(), s2.sum())
res = nanops.nansum(arr, axis=1)
self.assertTrue(np.isinf(res).all())
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_median(self):
self._check_stat_op('median', np.median)
# test with integers, test failure
int_ts = Series(np.ones(10, dtype=int), index=lrange(10))
self.assertAlmostEqual(np.median(int_ts), int_ts.median())
def test_mode(self):
s = Series([12, 12, 11, 10, 19, 11])
exp = Series([11, 12])
assert_series_equal(s.mode(), exp)
assert_series_equal(Series([1, 2, 3]).mode(), Series([], dtype='int64'))
lst = [5] * 20 + [1] * 10 + [6] * 25
np.random.shuffle(lst)
s = Series(lst)
assert_series_equal(s.mode(), Series([6]))
s = Series([5] * 10)
assert_series_equal(s.mode(), Series([5]))
s = Series(lst)
s[0] = np.nan
assert_series_equal(s.mode(), Series([6.]))
s = Series(list('adfasbasfwewefwefweeeeasdfasnbam'))
assert_series_equal(s.mode(), Series(['e']))
s = Series(['2011-01-03', '2013-01-02', '1900-05-03'], dtype='M8[ns]')
assert_series_equal(s.mode(), Series([], dtype="M8[ns]"))
s = Series(['2011-01-03', '2013-01-02', '1900-05-03', '2011-01-03',
'2013-01-02'], dtype='M8[ns]')
assert_series_equal(s.mode(), Series(['2011-01-03', '2013-01-02'],
dtype='M8[ns]'))
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_min(self):
self._check_stat_op('min', np.min, check_objects=True)
def test_max(self):
self._check_stat_op('max', np.max, check_objects=True)
def test_var_std(self):
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
result = self.ts.std(ddof=4)
expected = np.std(self.ts.values, ddof=4)
assert_almost_equal(result, expected)
result = self.ts.var(ddof=4)
expected = np.var(self.ts.values, ddof=4)
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.var(ddof=1)
self.assertTrue(isnull(result))
result = s.std(ddof=1)
self.assertTrue(isnull(result))
def test_sem(self):
alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.ts.sem(ddof=4)
expected = np.std(self.ts.values, ddof=4)/np.sqrt(len(self.ts.values))
assert_almost_equal(result, expected)
# 1 - element series with ddof=1
s = self.ts.iloc[[0]]
result = s.sem(ddof=1)
self.assertTrue(isnull(result))
def test_skew(self):
tm._skip_if_no_scipy()
from scipy.stats import skew
alt = lambda x: skew(x, bias=False)
self._check_stat_op('skew', alt)
# test corner cases, skew() returns NaN unless there's at least 3 values
min_N = 3
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
self.assertTrue(np.isnan(s.skew()))
self.assertTrue(np.isnan(df.skew()).all())
else:
self.assertEqual(0, s.skew())
self.assertTrue((df.skew() == 0).all())
def test_kurt(self):
tm._skip_if_no_scipy()
from scipy.stats import kurtosis
alt = lambda x: kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
self.assertAlmostEqual(s.kurt(), s.kurt(level=0)['bar'])
# test corner cases, kurt() returns NaN unless there's at least 4 values
min_N = 4
for i in range(1, min_N + 1):
s = Series(np.ones(i))
df = DataFrame(np.ones((i, i)))
if i < min_N:
self.assertTrue(np.isnan(s.kurt()))
self.assertTrue(np.isnan(df.kurt()).all())
else:
self.assertEqual(0, s.kurt())
self.assertTrue((df.kurt() == 0).all())
def test_argsort(self):
self._check_accum_op('argsort')
argsorted = self.ts.argsort()
self.assertTrue(issubclass(argsorted.dtype.type, np.integer))
# GH 2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp('201301%02d' % (i + 1)) for i in range(5)])
self.assertEqual(s.dtype, 'datetime64[ns]')
shifted = s.shift(-1)
self.assertEqual(shifted.dtype, 'datetime64[ns]')
self.assertTrue(isnull(shifted[4]))
result = s.argsort()
expected = Series(lrange(5), dtype='int64')
assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(lrange(4) + [-1], dtype='int64')
assert_series_equal(result, expected)
def test_argsort_stable(self):
s = Series(np.random.randint(0, 100, size=10000))
mindexer = s.argsort(kind='mergesort')
qindexer = s.argsort()
mexpected = np.argsort(s.values, kind='mergesort')
qexpected = np.argsort(s.values, kind='quicksort')
self.assert_numpy_array_equal(mindexer, mexpected)
self.assert_numpy_array_equal(qindexer, qexpected)
self.assertFalse(np.array_equal(qindexer, mindexer))
def test_reorder_levels(self):
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]],
names=['L0', 'L1', 'L2'])
s = Series(np.arange(6), index=index)
# no change, position
result = s.reorder_levels([0, 1, 2])
assert_series_equal(s, result)
# no change, labels
result = s.reorder_levels(['L0', 'L1', 'L2'])
assert_series_equal(s, result)
# rotate, position
result = s.reorder_levels([1, 2, 0])
e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0]],
names=['L1', 'L2', 'L0'])
expected = Series(np.arange(6), index=e_idx)
assert_series_equal(result, expected)
result = s.reorder_levels([0, 0, 0])
e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],
labels=[[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
names=['L0', 'L0', 'L0'])
expected = Series(range(6), index=e_idx)
assert_series_equal(result, expected)
result = s.reorder_levels(['L0', 'L0', 'L0'])
assert_series_equal(result, expected)
def test_cumsum(self):
self._check_accum_op('cumsum')
def test_cumprod(self):
self._check_accum_op('cumprod')
def test_cummin(self):
self.assert_numpy_array_equal(self.ts.cummin(),
np.minimum.accumulate(np.array(self.ts)))
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.cummin()[1::2]
expected = np.minimum.accumulate(ts.valid())
self.assert_numpy_array_equal(result, expected)
def test_cummax(self):
self.assert_numpy_array_equal(self.ts.cummax(),
np.maximum.accumulate(np.array(self.ts)))
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.cummax()[1::2]
expected = np.maximum.accumulate(ts.valid())
self.assert_numpy_array_equal(result, expected)
def test_cummin_datetime64(self):
s = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-1']))
result = s.cummin(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-1', '2000-1-1', '2000-1-1']))
result = s.cummin(skipna=False)
self.assert_series_equal(expected, result)
def test_cummax_datetime64(self):
s = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-1', 'NaT', '2000-1-3']))
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', 'NaT', '2000-1-2', 'NaT', '2000-1-3']))
result = s.cummax(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_datetime(
['NaT', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-2', '2000-1-3']))
result = s.cummax(skipna=False)
self.assert_series_equal(expected, result)
def test_cummin_timedelta64(self):
s = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ]))
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '1 min', 'NaT', '1 min', ]))
result = s.cummin(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', '2 min', '1 min', '1 min', '1 min', ]))
result = s.cummin(skipna=False)
self.assert_series_equal(expected, result)
def test_cummax_timedelta64(self):
s = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '1 min', 'NaT', '3 min', ]))
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', 'NaT', '2 min', 'NaT', '3 min', ]))
result = s.cummax(skipna=True)
self.assert_series_equal(expected, result)
expected = pd.Series(pd.to_timedelta(
['NaT', '2 min', '2 min', '2 min', '2 min', '3 min', ]))
result = s.cummax(skipna=False)
self.assert_series_equal(expected, result)
def test_npdiff(self):
raise nose.SkipTest("skipping due to Series no longer being an "
"ndarray")
# no longer works as the return type of np.diff is now nd.array
s = Series(np.arange(5))
r = np.diff(s)
assert_series_equal(Series([nan, 0, 0, 0, nan]), r)
def _check_stat_op(self, name, alternate, check_objects=False, check_allna=False):
import pandas.core.nanops as nanops
def testit():
f = getattr(Series, name)
# add some NaNs
self.series[5:15] = np.NaN
# idxmax, idxmin, min, and max are valid for dates
if name not in ['max','min']:
ds = Series(date_range('1/1/2001', periods=10))
self.assertRaises(TypeError, f, ds)
# skipna or no
self.assertTrue(notnull(f(self.series)))
self.assertTrue(isnull(f(self.series, skipna=False)))
# check the result is correct
nona = self.series.dropna()
assert_almost_equal(f(nona), alternate(nona.values))
assert_almost_equal(f(self.series), alternate(nona.values))
allna = self.series * nan
if check_allna:
# xref 9422
# bottleneck >= 1.0 give 0.0 for an allna Series sum
try:
self.assertTrue(nanops._USE_BOTTLENECK)
import bottleneck as bn
self.assertTrue(bn.__version__ >= LooseVersion('1.0'))
self.assertEqual(f(allna),0.0)
except:
self.assertTrue(np.isnan(f(allna)))
# dtype=object with None, it works!
s = Series([1, 2, 3, None, 5])
f(s)
# 2888
l = [0]
l.extend(lrange(2 ** 40, 2 ** 40+1000))
s = Series(l, dtype='int64')
assert_almost_equal(float(f(s)), float(alternate(s.values)))
# check date range
if check_objects:
s = Series(bdate_range('1/1/2000', periods=10))
res = f(s)
exp = alternate(s)
self.assertEqual(res, exp)
# check on string data
if name not in ['sum','min','max']:
self.assertRaises(TypeError, f, Series(list('abc')))
# Invalid axis.
self.assertRaises(ValueError, f, self.series, axis=1)
# Unimplemented numeric_only parameter.
if 'numeric_only' in getargspec(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
self.series, numeric_only=True)
testit()
try:
import bottleneck as bn
nanops._USE_BOTTLENECK = False
testit()
nanops._USE_BOTTLENECK = True
except ImportError:
pass
def _check_accum_op(self, name):
func = getattr(np, name)
self.assert_numpy_array_equal(func(self.ts), func(np.array(self.ts)))
# with missing values
ts = self.ts.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.valid()))
self.assert_numpy_array_equal(result, expected)
def test_round(self):
# numpy.round doesn't preserve metadata, probably a numpy bug,
# re: GH #314
result = np.round(self.ts, 2)
expected = Series(np.round(self.ts.values, 2), index=self.ts.index,
name='ts')
assert_series_equal(result, expected)
self.assertEqual(result.name, self.ts.name)
def test_prod_numpy16_bug(self):
s = Series([1., 1., 1.], index=lrange(3))
result = s.prod()
self.assertNotIsInstance(result, Series)
def test_quantile(self):
from numpy import percentile
q = self.ts.quantile(0.1)
self.assertEqual(q, percentile(self.ts.valid(), 10))
q = self.ts.quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# object dtype
q = Series(self.ts,dtype=object).quantile(0.9)
self.assertEqual(q, percentile(self.ts.valid(), 90))
# datetime64[ns] dtype
dts = self.ts.index.to_series()
q = dts.quantile(.2)
self.assertEqual(q, Timestamp('2000-01-10 19:12:00'))
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(.25)
self.assertEqual(q, pd.to_timedelta('24:00:00'))
# GH7661
result = Series([np.timedelta64('NaT')]).sum()
self.assertTrue(result is pd.NaT)
msg = 'percentiles should all be in the interval \\[0, 1\\]'
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with tm.assertRaisesRegexp(ValueError, msg):
self.ts.quantile(invalid)
def test_quantile_multi(self):
from numpy import percentile
qs = [.1, .9]
result = self.ts.quantile(qs)
expected = pd.Series([percentile(self.ts.valid(), 10),
percentile(self.ts.valid(), 90)],
index=qs, name=self.ts.name)
assert_series_equal(result, expected)
dts = self.ts.index.to_series()
dts.name = 'xxx'
result = dts.quantile((.2, .2))
expected = Series([Timestamp('2000-01-10 19:12:00'),
Timestamp('2000-01-10 19:12:00')],
index=[.2, .2], name='xxx')
assert_series_equal(result, expected)
result = self.ts.quantile([])
expected = pd.Series([], name=self.ts.name, index=Index([], dtype=float))
assert_series_equal(result, expected)
def test_append(self):
appendedSeries = self.series.append(self.objSeries)
for idx, value in compat.iteritems(appendedSeries):
if idx in self.series.index:
self.assertEqual(value, self.series[idx])
elif idx in self.objSeries.index:
self.assertEqual(value, self.objSeries[idx])
else:
self.fail("orphaned index!")
self.assertRaises(ValueError, self.ts.append, self.ts,
verify_integrity=True)
def test_append_many(self):
pieces = [self.ts[:5], self.ts[5:10], self.ts[10:]]
result = pieces[0].append(pieces[1:])
assert_series_equal(result, self.ts)
def test_all_any(self):
ts = tm.makeTimeSeries()
bool_series = ts > 0
self.assertFalse(bool_series.all())
self.assertTrue(bool_series.any())
# Alternative types, with implicit 'object' dtype.
s = Series(['abc', True])
self.assertEqual('abc', s.any()) # 'abc' || True => 'abc'
def test_all_any_params(self):
# Check skipna, with implicit 'object' dtype.
s1 = Series([np.nan, True])
s2 = Series([np.nan, False])
self.assertTrue(s1.all(skipna=False)) # nan && True => True
self.assertTrue(s1.all(skipna=True))
self.assertTrue(np.isnan(s2.any(skipna=False))) # nan || False => nan
self.assertFalse(s2.any(skipna=True))
# Check level.
s = pd.Series([False, False, True, True, False, True],
index=[0, 0, 1, 1, 2, 2])
assert_series_equal(s.all(level=0), Series([False, True, False]))
assert_series_equal(s.any(level=0), Series([False, True, True]))
# bool_only is not implemented with level option.
self.assertRaises(NotImplementedError, s.any, bool_only=True, level=0)
self.assertRaises(NotImplementedError, s.all, bool_only=True, level=0)
# bool_only is not implemented alone.
self.assertRaises(NotImplementedError, s.any, bool_only=True)
self.assertRaises(NotImplementedError, s.all, bool_only=True)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_modulo(self):
# GH3590, modulo as ints
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] % p['second']
expected = Series(p['first'].values %
p['second'].values, dtype='float64')
expected.iloc[0:3] = np.nan
assert_series_equal(result, expected)
result = p['first'] % 0
expected = Series(np.nan, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
expected = Series(p['first'].values % p['second'].values)
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] % p['second']
result2 = p['second'] % p['first']
self.assertFalse(np.array_equal(result, result2))
# GH 9144
s = Series([0, 1])
result = s % 0
expected = Series([nan, nan])
assert_series_equal(result, expected)
result = 0 % s
expected = Series([nan, 0.0])
assert_series_equal(result, expected)
def test_div(self):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'), check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan,1.,-1.])
result = s / 0
expected = Series([np.nan,np.inf,-np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1,0), 'second': (-0.01,-0.02)})
expected = Series([-0.01,-np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_almost_equal(cython_or_numpy, python)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other):
_check_op(series, other, operator.gt)
_check_op(series, other, operator.ge)
_check_op(series, other, operator.eq)
_check_op(series, other, operator.lt)
_check_op(series, other, operator.le)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_constructor_dtype_timedelta64(self):
# basic
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([timedelta(days=1)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([timedelta(days=1),timedelta(days=2),np.timedelta64(1,'s')])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# mixed with NaT
from pandas import tslib
td = Series([timedelta(days=1),tslib.NaT ], dtype='m8[ns]' )
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([timedelta(days=1),np.nan ], dtype='m8[ns]' )
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(300000000), pd.NaT],dtype='m8[ns]')
self.assertEqual(td.dtype, 'timedelta64[ns]')
# improved inference
# GH5689
td = Series([np.timedelta64(300000000), pd.NaT])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(300000000), tslib.iNaT])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(300000000), np.nan])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([pd.NaT, np.timedelta64(300000000)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
td = Series([np.timedelta64(1,'s')])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# these are frequency conversion astypes
#for t in ['s', 'D', 'us', 'ms']:
# self.assertRaises(TypeError, td.astype, 'm8[%s]' % t)
# valid astype
td.astype('int64')
# invalid casting
self.assertRaises(TypeError, td.astype, 'int32')
# this is an invalid casting
def f():
Series([timedelta(days=1), 'foo'],dtype='m8[ns]')
self.assertRaises(Exception, f)
# leave as object here
td = Series([timedelta(days=i) for i in range(3)] + ['foo'])
self.assertEqual(td.dtype, 'object')
# these will correctly infer a timedelta
s = Series([None, pd.NaT, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
s = Series([np.nan, pd.NaT, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
s = Series([pd.NaT, None, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
s = Series([pd.NaT, np.nan, '1 Day'])
self.assertEqual(s.dtype,'timedelta64[ns]')
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(
Exception, self.objSeries.__add__, np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(
Exception, self.objSeries.__sub__, np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24, rs.index).astype(
'int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'), Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series([timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5,seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5,seconds=1))
self.assertEqual(rs[2], value)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series(
[Timestamp('20130101 9:01:05'), Timestamp('20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series(
[Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5)
expected = Series(
[Timestamp('20130101 9:06:00.005'), Timestamp('20130101 9:07:00.005')])
assert_series_equal(result, expected)
# operate with np.timedelta64 correctly
result = s + np.timedelta64(1, 's')
result2 = np.timedelta64(1, 's') + s
expected = Series(
[Timestamp('20130101 9:01:01'), Timestamp('20130101 9:02:01')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + np.timedelta64(5, 'ms')
result2 = np.timedelta64(5, 'ms') + s
expected = Series(
[Timestamp('20130101 9:01:00.005'), Timestamp('20130101 9:02:00.005')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# valid DateOffsets
for do in [ 'Hour', 'Minute', 'Second', 'Day', 'Micro',
'Milli', 'Nano' ]:
op = getattr(pd.offsets,do)
s + op(5)
op(5) + s
def test_timedelta64_operations_with_DateOffset(self):
# GH 10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
assert_series_equal(result, expected)
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
assert_series_equal(result, expected)
# valid DateOffsets
for do in [ 'Hour', 'Minute', 'Second', 'Day', 'Micro',
'Milli', 'Nano' ]:
op = getattr(pd.offsets,do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) -Series(
[timedelta(seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2,td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = Series([timedelta(seconds=0)] * 3) -Series(
[timedelta(seconds=1)] * 3)
self.assertEqual(result.dtype, 'm8[ns]')
assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
assert_series_equal(result2, expected)
# roundtrip
assert_series_equal(result + td2,td1)
def test_timedelta64_operations_with_integers(self):
# GH 4521
# divide/multiply by integers
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
s2 = Series([2, 3, 4])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result,expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) / s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 / s2
assert_series_equal(result,expected)
result = s1 / 2
expected = Series(s1.values.astype(np.int64) / 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result,expected)
s2 = Series([20, 30, 40])
expected = Series(s1.values.astype(np.int64) * s2, dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result,expected)
for dtype in ['int32','int16','uint32','uint64','uint32','uint16','uint8']:
s2 = Series([20, 30, 40],dtype=dtype)
expected = Series(s1.values.astype(np.int64) * s2.astype(np.int64), dtype='m8[ns]')
expected[2] = np.nan
result = s1 * s2
assert_series_equal(result,expected)
result = s1 * 2
expected = Series(s1.values.astype(np.int64) * 2, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result,expected)
result = s1 * -1
expected = Series(s1.values.astype(np.int64) * -1, dtype='m8[ns]')
expected[2] = np.nan
assert_series_equal(result,expected)
# invalid ops
for op in ['__true_div__','__div__','__mul__']:
sop = getattr(s1,op,None)
if sop is not None:
self.assertRaises(TypeError, sop, s2.astype(float))
self.assertRaises(TypeError, sop, 2.)
for op in ['__add__','__sub__']:
sop = getattr(s1,op,None)
if sop is not None:
self.assertRaises(TypeError, sop, 1)
self.assertRaises(TypeError, sop, s2.values)
def test_timedelta64_conversions(self):
startdate = Series(date_range('2013-01-01', '2013-01-03'))
enddate = Series(date_range('2013-03-01', '2013-03-03'))
s1 = enddate - startdate
s1[2] = np.nan
for m in [1, 3, 10]:
for unit in ['D','h','m','s','ms','us','ns']:
# op
expected = s1.apply(lambda x: x / np.timedelta64(m,unit))
result = s1 / np.timedelta64(m,unit)
assert_series_equal(result, expected)
if m == 1 and unit != 'ns':
# astype
result = s1.astype("timedelta64[{0}]".format(unit))
assert_series_equal(result, expected)
# reverse op
expected = s1.apply(lambda x: np.timedelta64(m,unit) / x)
result = np.timedelta64(m,unit) / s1
# astype
s = Series(date_range('20130101',periods=3))
result = s.astype(object)
self.assertIsInstance(result.iloc[0],datetime)
self.assertTrue(result.dtype == np.object_)
result = s1.astype(object)
self.assertIsInstance(result.iloc[0],timedelta)
self.assertTrue(result.dtype == np.object_)
def test_timedelta64_equal_timedelta_supported_ops(self):
ser = Series([Timestamp('20130301'), Timestamp('20130228 23:00:00'),
Timestamp('20130228 22:00:00'),
Timestamp('20130228 21:00:00')])
intervals = 'D', 'h', 'm', 's', 'us'
npy16_mappings = {'D': 24 * 60 * 60 * 1000000, 'h': 60 * 60 * 1000000,
'm': 60 * 1000000, 's': 1000000, 'us': 1}
def timedelta64(*args):
return sum(starmap(np.timedelta64, zip(args, intervals)))
for op, d, h, m, s, us in product([operator.add, operator.sub],
*([range(2)] * 5)):
nptd = timedelta64(d, h, m, s, us)
pytd = timedelta(days=d, hours=h, minutes=m, seconds=s,
microseconds=us)
lhs = op(ser, nptd)
rhs = op(ser, pytd)
try:
assert_series_equal(lhs, rhs)
except:
raise AssertionError(
"invalid comparsion [op->{0},d->{1},h->{2},m->{3},s->{4},us->{5}]\n{6}\n{7}\n".format(op, d, h, m, s, us, lhs, rhs))
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s,Series(Timedelta('1 days'),index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s,Series([np.nan,Timedelta('1 days')],index=['A','B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'),index=['A','B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
def test_operators_datetimelike(self):
def run_ops(ops, get_ser, test_ser):
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not defined
for op_str in ops:
op = getattr(get_ser, op_str, None)
with tm.assertRaisesRegexp(TypeError, 'operate'):
op(test_ser)
### timedelta64 ###
td1 = Series([timedelta(minutes=5,seconds=3)]*3)
td1.iloc[2] = np.nan
td2 = timedelta(minutes=5,seconds=4)
ops = ['__mul__','__floordiv__','__pow__',
'__rmul__','__rfloordiv__','__rpow__']
run_ops(ops, td1, td2)
td1 + td2
td2 + td1
td1 - td2
td2 - td1
td1 / td2
td2 / td1
### datetime64 ###
dt1 = Series([Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')])
dt1.iloc[2] = np.nan
dt2 = Series([Timestamp('20111231'), Timestamp('20120102'),
Timestamp('20120104')])
ops = ['__add__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__radd__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, dt1, dt2)
dt1 - dt2
dt2 - dt1
### datetime64 with timetimedelta ###
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
run_ops(ops, dt1, td1)
dt1 + td1
td1 + dt1
dt1 - td1
# TODO: Decide if this ought to work.
# td1 - dt1
### timetimedelta with datetime64 ###
ops = ['__sub__', '__mul__', '__floordiv__', '__truediv__', '__div__',
'__pow__', '__rsub__', '__rmul__', '__rfloordiv__',
'__rtruediv__', '__rdiv__', '__rpow__']
run_ops(ops, td1, dt1)
td1 + dt1
dt1 + td1
# 8260, 10763
# datetime64 with tz
ops = ['__mul__', '__floordiv__', '__truediv__', '__div__', '__pow__',
'__rmul__', '__rfloordiv__', '__rtruediv__', '__rdiv__',
'__rpow__']
dt1 = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'),name='foo')
dt2 = dt1.copy()
dt2.iloc[2] = np.nan
td1 = Series(timedelta_range('1 days 1 min',periods=5, freq='H'))
td2 = td1.copy()
td2.iloc[1] = np.nan
run_ops(ops, dt1, td1)
result = dt1 + td1[0]
expected = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
result = dt2 + td2[0]
expected = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
# odd numpy behavior with scalar timedeltas
if not _np_version_under1p8:
result = td1[0] + dt1
expected = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
result = td2[0] + dt2
expected = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
result = dt1 - td1[0]
expected = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
self.assertRaises(TypeError, lambda: td1[0] - dt1)
result = dt2 - td2[0]
expected = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
self.assertRaises(TypeError, lambda: td2[0] - dt2)
result = dt1 + td1
expected = (dt1.dt.tz_localize(None) + td1).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
result = dt2 + td2
expected = (dt2.dt.tz_localize(None) + td2).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
result = dt1 - td1
expected = (dt1.dt.tz_localize(None) - td1).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
result = dt2 - td2
expected = (dt2.dt.tz_localize(None) - td2).dt.tz_localize('US/Eastern')
assert_series_equal(result, expected)
self.assertRaises(TypeError, lambda: td1 - dt1)
self.assertRaises(TypeError, lambda: td2 - dt2)
def test_ops_datetimelike_align(self):
# GH 7500
# datetimelike ops need to align
dt = Series(date_range('2012-1-1', periods=3, freq='D'))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
assert_series_equal(result, expected)
def test_timedelta64_functions(self):
from datetime import timedelta
from pandas import date_range
# index min/max
td = Series(date_range('2012-1-1', periods=3, freq='D')) - \
Timestamp('20120101')
result = td.idxmin()
self.assertEqual(result, 0)
result = td.idxmax()
self.assertEqual(result, 2)
# GH 2982
# with NaT
td[0] = np.nan
result = td.idxmin()
self.assertEqual(result, 1)
result = td.idxmax()
self.assertEqual(result, 2)
# abs
s1 = Series(date_range('20120101', periods=3))
s2 = Series(date_range('20120102', periods=3))
expected = Series(s2 - s1)
# this fails as numpy returns timedelta64[us]
#result = np.abs(s1-s2)
# assert_frame_equal(result,expected)
result = (s1 - s2).abs()
assert_series_equal(result, expected)
# max/min
result = td.max()
expected = Timedelta('2 days')
self.assertEqual(result, expected)
result = td.min()
expected = Timedelta('1 days')
self.assertEqual(result, expected)
def test_ops_consistency_on_empty(self):
# GH 7869
# consistency on empty
# float
result = Series(dtype=float).sum()
self.assertEqual(result,0)
result = Series(dtype=float).mean()
self.assertTrue(isnull(result))
result = Series(dtype=float).median()
self.assertTrue(isnull(result))
# timedelta64[ns]
result = Series(dtype='m8[ns]').sum()
self.assertEqual(result, Timedelta(0))
result = Series(dtype='m8[ns]').mean()
self.assertTrue(result is pd.NaT)
result = Series(dtype='m8[ns]').median()
self.assertTrue(result is pd.NaT)
def test_timedelta_fillna(self):
#GH 3371
s = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130102'), Timestamp('20130103 9:01:01')])
td = s.diff()
# reg fillna
result = td.fillna(0)
expected = Series([timedelta(0), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
# interprested as seconds
result = td.fillna(1)
expected = Series([timedelta(seconds=1), timedelta(0),
timedelta(1), timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
result = td.fillna(timedelta(days=1, seconds=1))
expected = Series([timedelta(days=1, seconds=1), timedelta(0),
timedelta(1), timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
result = td.fillna(np.timedelta64(int(1e9)))
expected = Series([timedelta(seconds=1), timedelta(0), timedelta(1),
timedelta(days=1, seconds=9*3600+60+1)])
assert_series_equal(result, expected)
from pandas import tslib
result = td.fillna(tslib.NaT)
expected = Series([tslib.NaT, timedelta(0), timedelta(1),
timedelta(days=1, seconds=9*3600+60+1)], dtype='m8[ns]')
assert_series_equal(result, expected)
# ffill
td[2] = np.nan
result = td.ffill()
expected = td.fillna(0)
expected[0] = np.nan
assert_series_equal(result, expected)
# bfill
td[2] = np.nan
result = td.bfill()
expected = td.fillna(0)
expected[2] = timedelta(days=1, seconds=9*3600+60+1)
assert_series_equal(result, expected)
def test_datetime64_fillna(self):
s = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130102'), Timestamp('20130103 9:01:01')])
s[2] = np.nan
# reg fillna
result = s.fillna(Timestamp('20130104'))
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130104'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
from pandas import tslib
result = s.fillna(tslib.NaT)
expected = s
assert_series_equal(result, expected)
# ffill
result = s.ffill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130101'), Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# bfill
result = s.bfill()
expected = Series([Timestamp('20130101'), Timestamp('20130101'),
Timestamp('20130103 9:01:01'),
Timestamp('20130103 9:01:01')])
assert_series_equal(result, expected)
# GH 6587
# make sure that we are treating as integer when filling
# this also tests inference of a datetime-like with NaT's
s = Series([pd.NaT, pd.NaT, '2013-08-05 15:30:00.000001'])
expected = Series(['2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001', '2013-08-05 15:30:00.000001'], dtype='M8[ns]')
result = s.fillna(method='backfill')
assert_series_equal(result, expected)
def test_datetime64_tz_fillna(self):
for tz in ['US/Eastern', 'Asia/Tokyo']:
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-01-03 10:00'), pd.NaT])
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'), Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-02 10:00', tz=tz)])
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00'), 'AAA',
Timestamp('2011-01-03 10:00'), 'AAA'], dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00'),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00'),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00'), Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT], tz=tz)
s = pd.Series(idx)
result = s.fillna(pd.Timestamp('2011-01-02 10:00'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-02 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz))
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna(pd.Timestamp('2011-01-02 10:00', tz=tz).to_pydatetime())
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-02 10:00',
'2011-01-03 10:00', '2011-01-02 10:00'],
tz=tz)
expected = Series(idx)
self.assert_series_equal(expected, result)
result = s.fillna('AAA')
expected = Series([Timestamp('2011-01-01 10:00', tz=tz), 'AAA',
Timestamp('2011-01-03 10:00', tz=tz), 'AAA'],
dtype=object)
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00')})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00')])
self.assert_series_equal(expected, result)
result = s.fillna({1: pd.Timestamp('2011-01-02 10:00', tz=tz),
3: pd.Timestamp('2011-01-04 10:00', tz=tz)})
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2011-01-02 10:00', tz=tz),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2011-01-04 10:00', tz=tz)])
self.assert_series_equal(expected, result)
# filling with a naive/other zone, coerce to object
result = s.fillna(Timestamp('20130101'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01')])
self.assert_series_equal(expected, result)
result = s.fillna(Timestamp('20130101',tz='US/Pacific'))
expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
Timestamp('2013-01-01',tz='US/Pacific'),
Timestamp('2011-01-03 10:00', tz=tz),
Timestamp('2013-01-01',tz='US/Pacific')])
self.assert_series_equal(expected, result)
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
assert_series_equal(s.fillna(method='ffill', inplace=False), s)
def test_fillna_raise(self):
s = Series(np.random.randint(-100, 100, 50))
self.assertRaises(TypeError, s.fillna, [1, 2])
self.assertRaises(TypeError, s.fillna, (1, 2))
def test_raise_on_info(self):
s = Series(np.random.randn(10))
with tm.assertRaises(AttributeError):
s.info()
def test_isnull_for_inf(self):
s = Series(['a', np.inf, np.nan, 1.0])
with pd.option_context('mode.use_inf_as_null', True):
r = s.isnull()
dr = s.dropna()
e = Series([False, True, True, False])
de = Series(['a', 1.0], index=[0, 3])
tm.assert_series_equal(r, e)
tm.assert_series_equal(dr, de)
# TimeSeries-specific
def test_fillna(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
self.assert_numpy_array_equal(ts, ts.fillna(method='ffill'))
ts[2] = np.NaN
self.assert_numpy_array_equal(ts.fillna(method='ffill'),
[0., 1., 1., 3., 4.])
self.assert_numpy_array_equal(ts.fillna(method='backfill'),
[0., 1., 3., 3., 4.])
self.assert_numpy_array_equal(ts.fillna(value=5), [0., 1., 5., 3., 4.])
self.assertRaises(ValueError, ts.fillna)
self.assertRaises(ValueError, self.ts.fillna, value=0, method='ffill')
# GH 5703
s1 = Series([np.nan])
s2 = Series([1])
result = s1.fillna(s2)
expected = Series([1.])
assert_series_equal(result,expected)
result = s1.fillna({})
assert_series_equal(result,s1)
result = s1.fillna(Series(()))
assert_series_equal(result,s1)
result = s2.fillna(s1)
assert_series_equal(result,s2)
result = s1.fillna({ 0 : 1})
assert_series_equal(result,expected)
result = s1.fillna({ 1 : 1})
assert_series_equal(result,Series([np.nan]))
result = s1.fillna({ 0 : 1, 1 : 1})
assert_series_equal(result,expected)
result = s1.fillna(Series({ 0 : 1, 1 : 1}))
assert_series_equal(result,expected)
result = s1.fillna(Series({ 0 : 1, 1 : 1},index=[4,5]))
assert_series_equal(result,s1)
s1 = Series([0, 1, 2], list('abc'))
s2 = Series([0, np.nan, 2], list('bac'))
result = s2.fillna(s1)
expected = Series([0,0,2.], list('bac'))
assert_series_equal(result,expected)
# limit
s = Series(np.nan,index=[0,1,2])
result = s.fillna(999,limit=1)
expected = Series([999,np.nan,np.nan],index=[0,1,2])
assert_series_equal(result,expected)
result = s.fillna(999,limit=2)
expected = Series([999,999,np.nan],index=[0,1,2])
assert_series_equal(result,expected)
# GH 9043
# make sure a string representation of int/float values can be filled
# correctly without raising errors or being converted
vals = ['0', '1.5', '-0.3']
for val in vals:
s = Series([0, 1, np.nan, np.nan, 4], dtype='float64')
result = s.fillna(val)
expected = Series([0, 1, val, val, 4], dtype='object')
assert_series_equal(result, expected)
def test_fillna_bug(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
filled = x.fillna(method='ffill')
expected = Series([nan, 1., 1., 3., 3.], x.index)
assert_series_equal(filled, expected)
filled = x.fillna(method='bfill')
expected = Series([1., 1., 3., 3., nan], x.index)
assert_series_equal(filled, expected)
def test_fillna_inplace(self):
x = Series([nan, 1., nan, 3., nan], ['z', 'a', 'b', 'c', 'd'])
y = x.copy()
y.fillna(value=0, inplace=True)
expected = x.fillna(value=0)
assert_series_equal(y, expected)
def test_fillna_invalid_method(self):
try:
self.ts.fillna(method='ffil')
except ValueError as inst:
self.assertIn('ffil', str(inst))
def test_ffill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.ffill(), ts.fillna(method='ffill'))
def test_bfill(self):
ts = Series([0., 1., 2., 3., 4.], index=tm.makeDateIndex(5))
ts[2] = np.NaN
assert_series_equal(ts.bfill(), ts.fillna(method='bfill'))
def test_sub_of_datetime_from_TimeSeries(self):
from pandas.tseries.timedeltas import to_timedelta
from datetime import datetime
a = Timestamp(datetime(1993, 0o1, 0o7, 13, 30, 00))
b = datetime(1993, 6, 22, 13, 30)
a = Series([a])
result = to_timedelta(np.abs(a - b))
self.assertEqual(result.dtype, 'timedelta64[ns]')
def test_datetime64_with_index(self):
# arithmetic integer ops with an index
s = Series(np.random.randn(5))
expected = s - s.index.to_series()
result = s - s.index
assert_series_equal(result, expected)
# GH 4629
# arithmetic datetime64 ops with an index
s = Series(date_range('20130101', periods=5),
index=date_range('20130101', periods=5))
expected = s - s.index.to_series()
result = s - s.index
assert_series_equal(result, expected)
result = s - s.index.to_period()
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(5,2),
index=date_range('20130101', periods=5))
df['date'] = Timestamp('20130102')
df['expected'] = df['date'] - df.index.to_series()
df['result'] = df['date'] - df.index
assert_series_equal(df['result'], df['expected'], check_names=False)
def test_timedelta64_nan(self):
from pandas import tslib
td = Series([timedelta(days=i) for i in range(10)])
# nan ops on timedeltas
td1 = td.copy()
td1[0] = np.nan
self.assertTrue(isnull(td1[0]))
self.assertEqual(td1[0].value, tslib.iNaT)
td1[0] = td[0]
self.assertFalse(isnull(td1[0]))
td1[1] = tslib.iNaT
self.assertTrue(isnull(td1[1]))
self.assertEqual(td1[1].value, tslib.iNaT)
td1[1] = td[1]
self.assertFalse(isnull(td1[1]))
td1[2] = tslib.NaT
self.assertTrue(isnull(td1[2]))
self.assertEqual(td1[2].value, tslib.iNaT)
td1[2] = td[2]
self.assertFalse(isnull(td1[2]))
# boolean setting
# this doesn't work, not sure numpy even supports it
#result = td[(td>np.timedelta64(timedelta(days=3))) & (td<np.timedelta64(timedelta(days=7)))] = np.nan
#self.assertEqual(isnull(result).sum(), 7)
# NumPy limitiation =(
# def test_logical_range_select(self):
# np.random.seed(12345)
# selector = -0.5 <= self.ts <= 0.5
# expected = (self.ts >= -0.5) & (self.ts <= 0.5)
# assert_series_equal(selector, expected)
def test_operators_na_handling(self):
from decimal import Decimal
from datetime import date
s = Series([Decimal('1.3'), Decimal('2.3')],
index=[date(2012, 1, 1), date(2012, 1, 2)])
result = s + s.shift(1)
result2 = s.shift(1) + s
self.assertTrue(isnull(result[0]))
self.assertTrue(isnull(result2[0]))
s = Series(['foo', 'bar', 'baz', np.nan])
result = 'prefix_' + s
expected = Series(['prefix_foo', 'prefix_bar', 'prefix_baz', np.nan])
assert_series_equal(result, expected)
result = s + '_suffix'
expected = Series(['foo_suffix', 'bar_suffix', 'baz_suffix', np.nan])
assert_series_equal(result, expected)
def test_object_comparisons(self):
s = Series(['a', 'b', np.nan, 'c', 'a'])
result = s == 'a'
expected = Series([True, False, False, False, True])
assert_series_equal(result, expected)
result = s < 'a'
expected = Series([False, False, False, False, False])
assert_series_equal(result, expected)
result = s != 'a'
expected = -(s == 'a')
assert_series_equal(result, expected)
def test_comparison_tuples(self):
# GH11339
# comparisons vs tuple
s = Series([(1,1),(1,2)])
result = s == (1,2)
expected = Series([False,True])
assert_series_equal(result, expected)
result = s != (1,2)
expected = Series([True, False])
assert_series_equal(result, expected)
result = s == (0,0)
expected = Series([False, False])
assert_series_equal(result, expected)
result = s != (0,0)
expected = Series([True, True])
assert_series_equal(result, expected)
s = Series([(1,1),(1,1)])
result = s == (1,1)
expected = Series([True, True])
assert_series_equal(result, expected)
result = s != (1,1)
expected = Series([False, False])
assert_series_equal(result, expected)
s = Series([frozenset([1]),frozenset([1,2])])
result = s == frozenset([1])
expected = Series([True, False])
assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
# test that comparisons work
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
val = s[5]
f = getattr(operator, op)
result = f(s, val)
expected = f(s.dropna(), val).reindex(s.index)
if op == 'ne':
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
assert_series_equal(result, expected)
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# assert_series_equal(result, expected)
# boolean &, |, ^ should work with object arrays and propagate NAs
ops = ['and_', 'or_', 'xor']
mask = s.isnull()
for bool_op in ops:
f = getattr(operator, bool_op)
filled = s.fillna(s[0])
result = f(s < s[9], s > s[3])
expected = f(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_comparison_object_numeric_nas(self):
s = Series(np.random.randn(10), dtype=object)
shifted = s.shift(2)
ops = ['lt', 'le', 'gt', 'ge', 'eq', 'ne']
for op in ops:
f = getattr(operator, op)
result = f(s, shifted)
expected = f(s.astype(float), shifted.astype(float))
assert_series_equal(result, expected)
def test_comparison_invalid(self):
# GH4968
# invalid date/int comparisons
s = Series(range(5))
s2 = Series(date_range('20010101', periods=5))
for (x, y) in [(s,s2),(s2,s)]:
self.assertRaises(TypeError, lambda : x == y)
self.assertRaises(TypeError, lambda : x != y)
self.assertRaises(TypeError, lambda : x >= y)
self.assertRaises(TypeError, lambda : x > y)
self.assertRaises(TypeError, lambda : x < y)
self.assertRaises(TypeError, lambda : x <= y)
def test_more_na_comparisons(self):
left = Series(['a', np.nan, 'c'])
right = Series(['a', np.nan, 'd'])
result = left == right
expected = Series([True, False, False])
assert_series_equal(result, expected)
result = left != right
expected = Series([False, True, True])
assert_series_equal(result, expected)
result = left == np.nan
expected = Series([False, False, False])
assert_series_equal(result, expected)
result = left != np.nan
expected = Series([True, True, True])
assert_series_equal(result, expected)
def test_comparison_different_length(self):
a = Series(['a', 'b', 'c'])
b = Series(['b', 'a'])
self.assertRaises(ValueError, a.__lt__, b)
a = Series([1, 2])
b = Series([2, 3, 4])
self.assertRaises(ValueError, a.__eq__, b)
def test_comparison_label_based(self):
# GH 4947
# comparisons should be label based
a = Series([True, False, True], list('bca'))
b = Series([False, True, False], list('abc'))
expected = Series([True, False, False], list('bca'))
result = a & b
assert_series_equal(result,expected)
expected = Series([True, False, True], list('bca'))
result = a | b
assert_series_equal(result,expected)
expected = Series([False, False, True], list('bca'))
result = a ^ b
assert_series_equal(result,expected)
# rhs is bigger
a = Series([True, False, True], list('bca'))
b = Series([False, True, False, True], list('abcd'))
expected = Series([True, False, False], list('bca'))
result = a & b
assert_series_equal(result,expected)
expected = Series([True, False, True], list('bca'))
result = a | b
assert_series_equal(result,expected)
# filling
# vs empty
result = a & Series([])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result,expected)
result = a | Series([])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result,expected)
# vs non-matching
result = a & Series([1],['z'])
expected = Series([False, False, False], list('bca'))
assert_series_equal(result,expected)
result = a | Series([1],['z'])
expected = Series([True, False, True], list('bca'))
assert_series_equal(result,expected)
# identity
# we would like s[s|e] == s to hold for any e, whether empty or not
for e in [Series([]),Series([1],['z']),Series(['z']),Series(np.nan,b.index),Series(np.nan,a.index)]:
result = a[a | e]
assert_series_equal(result,a[a])
# vs scalars
index = list('bca')
t = Series([True,False,True])
for v in [True,1,2]:
result = Series([True,False,True],index=index) | v
expected = Series([True,True,True],index=index)
assert_series_equal(result,expected)
for v in [np.nan,'foo']:
self.assertRaises(TypeError, lambda : t | v)
for v in [False,0]:
result = Series([True,False,True],index=index) | v
expected = Series([True,False,True],index=index)
assert_series_equal(result,expected)
for v in [True,1]:
result = Series([True,False,True],index=index) & v
expected = Series([True,False,True],index=index)
assert_series_equal(result,expected)
for v in [False,0]:
result = Series([True,False,True],index=index) & v
expected = Series([False,False,False],index=index)
assert_series_equal(result,expected)
for v in [np.nan]:
self.assertRaises(TypeError, lambda : t & v)
def test_operators_bitwise(self):
# GH 9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
s_0101 = Series([0,1,0,1])
s_0123 = Series(range(4),dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4),dtype='int64')
assert_series_equal(res, expected)
res = s_0123 | s_4444
expected = Series(range(4, 8),dtype='int64')
assert_series_equal(res, expected)
s_a0b1c0 = Series([1], list('b'))
res = s_tft & s_a0b1c0
expected = s_tff
assert_series_equal(res, expected)
res = s_tft | s_a0b1c0
expected = s_tft
assert_series_equal(res, expected)
n0 = 0
res = s_tft & n0
expected = s_fff
assert_series_equal(res, expected)
res = s_0123 & n0
expected = Series([0] * 4)
assert_series_equal(res, expected)
n1 = 1
res = s_tft & n1
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & n1
expected = Series([0, 1, 0, 1])
assert_series_equal(res, expected)
s_1111 = Series([1]*4, dtype='int8')
res = s_0123 & s_1111
expected = Series([0, 1, 0, 1], dtype='int64')
assert_series_equal(res, expected)
res = s_0123.astype(np.int16) | s_1111.astype(np.int32)
expected = Series([1, 1, 3, 3], dtype='int32')
assert_series_equal(res, expected)
self.assertRaises(TypeError, lambda: s_1111 & 'a')
self.assertRaises(TypeError, lambda: s_1111 & ['a','b','c','d'])
self.assertRaises(TypeError, lambda: s_0123 & np.NaN)
self.assertRaises(TypeError, lambda: s_0123 & 3.14)
self.assertRaises(TypeError, lambda: s_0123 & [0.1, 4, 3.14, 2])
# s_0123 will be all false now because of reindexing like s_tft
assert_series_equal(s_tft & s_0123, Series([False] * 3, list('bca')))
# s_tft will be all false now because of reindexing like s_0123
assert_series_equal(s_0123 & s_tft, Series([False] * 4))
assert_series_equal(s_0123 & False, Series([False] * 4))
assert_series_equal(s_0123 ^ False, Series([False, True, True, True]))
assert_series_equal(s_0123 & [False], Series([False] * 4))
assert_series_equal(s_0123 & (False), Series([False] * 4))
assert_series_equal(s_0123 & Series([False, np.NaN, False, False]), Series([False] * 4))
s_ftft = Series([False, True, False, True])
assert_series_equal(s_0123 & Series([0.1, 4, -3.14, 2]), s_ftft)
s_abNd = Series(['a','b',np.NaN,'d'])
res = s_0123 & s_abNd
expected = s_ftft
assert_series_equal(res, expected)
def test_between(self):
s = Series(bdate_range('1/1/2000', periods=20).asobject)
s[::2] = np.nan
result = s[s.between(s[3], s[17])]
expected = s[3:18].dropna()
assert_series_equal(result, expected)
result = s[s.between(s[3], s[17], inclusive=False)]
expected = s[5:16].dropna()
assert_series_equal(result, expected)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8, 9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_scalar_na_cmp_corners(self):
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
def tester(a, b):
return a & b
self.assertRaises(TypeError, tester, s, datetime(2005, 1, 1))
s = Series([2, 3, 4, 5, 6, 7, 8, 9, datetime(2005, 1, 1)])
s[::2] = np.nan
expected = Series(True,index=s.index)
expected[::2] = False
assert_series_equal(tester(s, list(s)), expected)
d = DataFrame({'A': s})
# TODO: Fix this exception - needs to be fixed! (see GH5035)
# (previously this was a TypeError because series returned
# NotImplemented
self.assertRaises(ValueError, tester, s, d)
def test_idxmin(self):
# test idxmin
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmin()], self.series.min())
self.assertTrue(isnull(self.series.idxmin(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmin()], nona.min())
self.assertEqual(nona.index.values.tolist().index(nona.idxmin()),
nona.values.argmin())
# all NaNs
allna = self.series * nan
self.assertTrue(isnull(allna.idxmin()))
# datetime64[ns]
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmin()
self.assertEqual(result, 0)
s[0] = np.nan
result = s.idxmin()
self.assertEqual(result, 1)
def test_idxmax(self):
# test idxmax
# _check_stat_op approach can not be used here because of isnull check.
# add some NaNs
self.series[5:15] = np.NaN
# skipna or no
self.assertEqual(self.series[self.series.idxmax()], self.series.max())
self.assertTrue(isnull(self.series.idxmax(skipna=False)))
# no NaNs
nona = self.series.dropna()
self.assertEqual(nona[nona.idxmax()], nona.max())
self.assertEqual(nona.index.values.tolist().index(nona.idxmax()),
nona.values.argmax())
# all NaNs
allna = self.series * nan
self.assertTrue(isnull(allna.idxmax()))
from pandas import date_range
s = Series(date_range('20130102', periods=6))
result = s.idxmax()
self.assertEqual(result, 5)
s[5] = np.nan
result = s.idxmax()
self.assertEqual(result, 4)
# Float64Index
# GH 5914
s = pd.Series([1,2,3],[1.1,2.1,3.1])
result = s.idxmax()
self.assertEqual(result, 3.1)
result = s.idxmin()
self.assertEqual(result, 1.1)
s = pd.Series(s.index, s.index)
result = s.idxmax()
self.assertEqual(result, 3.1)
result = s.idxmin()
self.assertEqual(result, 1.1)
def test_ndarray_compat(self):
# test numpy compat with Series as sub-class of NDFrame
tsdf = DataFrame(np.random.randn(1000, 3), columns=['A', 'B', 'C'],
index=date_range('1/1/2000', periods=1000))
def f(x):
return x[x.argmax()]
result = tsdf.apply(f)
expected = tsdf.max()
assert_series_equal(result,expected)
# .item()
s = Series([1])
result = s.item()
self.assertEqual(result, 1)
self.assertEqual(s.item(), s.iloc[0])
# using an ndarray like function
s = Series(np.random.randn(10))
result = np.ones_like(s)
expected = Series(1,index=range(10),dtype='float64')
#assert_series_equal(result,expected)
# ravel
s = Series(np.random.randn(10))
tm.assert_almost_equal(s.ravel(order='F'),s.values.ravel(order='F'))
# compress
# GH 6658
s = Series([0, 1., -1], index=list('abc'))
result = np.compress(s > 0, s)
assert_series_equal(result, Series([1.], index=['b']))
result = np.compress(s < -1, s)
# result empty Index(dtype=object) as the same as original
exp = Series([], dtype='float64', index=Index([], dtype='object'))
assert_series_equal(result, exp)
s = Series([0, 1., -1], index=[.1, .2, .3])
result = np.compress(s > 0, s)
assert_series_equal(result, Series([1.], index=[.2]))
result = np.compress(s < -1, s)
# result empty Float64Index as the same as original
exp = Series([], dtype='float64', index=Index([], dtype='float64'))
assert_series_equal(result, exp)
def test_complexx(self):
# GH4819
# complex access for ndarray compat
a = np.arange(5)
b = Series(a + 4j*a)
tm.assert_almost_equal(a,b.real)
tm.assert_almost_equal(4*a,b.imag)
b.real = np.arange(5)+5
tm.assert_almost_equal(a+5,b.real)
tm.assert_almost_equal(4*a,b.imag)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1,2,3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2,2,2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(dict(a = [1,2,3], b = [1,2,3], c = [1,2,3], val = [0,1,0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df,expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment',None)
df = DataFrame({ "aa":range(5), "bb":[2.2]*5})
df["cc"] = 0.0
ck = [True]*len(df)
df["bb"].iloc[0] = .13
df_tmp = df.iloc[ck]
df["bb"].iloc[0] = .15
self.assertEqual(df['bb'].iloc[0], 0.15)
pd.set_option('chained_assignment','raise')
# GH 3217
df = DataFrame(dict(a = [1,3], b = [np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'],index=[0]))
expected = DataFrame(dict(a = [1,3], b = [np.nan, 2], c = ['foo',np.nan]))
tm.assert_frame_equal(df,expected)
def test_operators_corner(self):
series = self.ts
empty = Series([], index=Index([]))
result = series + empty
self.assertTrue(np.isnan(result).all())
result = empty + Series([], index=Index([]))
self.assertEqual(len(result), 0)
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = self.ts.astype(int)[:-5]
added = self.ts + int_ts
expected = self.ts.values[:-5] + int_ts.values
self.assert_numpy_array_equal(added[:-5], expected)
def test_operators_reverse_object(self):
# GH 56
arr = Series(np.random.randn(10), index=np.arange(10),
dtype=object)
def _check_op(arr, op):
result = op(1., arr)
expected = op(1., arr.astype(float))
assert_series_equal(result.astype(float), expected)
_check_op(arr, operator.add)
_check_op(arr, operator.sub)
_check_op(arr, operator.mul)
_check_op(arr, operator.truediv)
_check_op(arr, operator.floordiv)
def test_series_frame_radd_bug(self):
import operator
# GH 353
vals = Series(tm.rands_array(5, 10))
result = 'foo_' + vals
expected = vals.map(lambda x: 'foo_' + x)
assert_series_equal(result, expected)
frame = DataFrame({'vals': vals})
result = 'foo_' + frame
expected = DataFrame({'vals': vals.map(lambda x: 'foo_' + x)})
tm.assert_frame_equal(result, expected)
# really raise this time
self.assertRaises(TypeError, operator.add, datetime.now(), self.ts)
def test_operators_frame(self):
# rpow does not work with DataFrame
df = DataFrame({'A': self.ts})
tm.assert_almost_equal(self.ts + self.ts, self.ts + df['A'])
tm.assert_almost_equal(self.ts ** self.ts, self.ts ** df['A'])
tm.assert_almost_equal(self.ts < self.ts, self.ts < df['A'])
tm.assert_almost_equal(self.ts / self.ts, self.ts / df['A'])
def test_operators_combine(self):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isnull(a)
bmask = isnull(b)
exp_values = []
for i in range(len(exp_index)):
if amask[i]:
if bmask[i]:
exp_values.append(nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
assert_series_equal(result, expected)
a = Series([nan, 1., 2., 3., nan], index=np.arange(5))
b = Series([nan, 1, nan, 3, nan, 4.], index=np.arange(6))
pairings = []
for op in ['add', 'sub', 'mul', 'pow', 'truediv', 'floordiv']:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, 'r' + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
if compat.PY3:
pairings.append((Series.div, operator.truediv, 1))
pairings.append((Series.rdiv, lambda x, y: operator.truediv(y, x), 1))
else:
pairings.append((Series.div, operator.div, 1))
pairings.append((Series.rdiv, lambda x, y: operator.div(y, x), 1))
for op, equiv_op, fv in pairings:
result = op(a, b)
exp = equiv_op(a, b)
assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
def test_combine_first(self):
values = tm.makeIntIndex(20).values.astype(float)
series = Series(values, index=tm.makeIntIndex(20))
series_copy = series * 2
series_copy[::2] = np.NaN
# nothing used from the input
combined = series.combine_first(series_copy)
self.assert_numpy_array_equal(combined, series)
# Holes filled from input
combined = series_copy.combine_first(series)
self.assertTrue(np.isfinite(combined).all())
self.assert_numpy_array_equal(combined[::2], series[::2])
self.assert_numpy_array_equal(combined[1::2], series_copy[1::2])
# mixed types
index = tm.makeStringIndex(20)
floats = Series(tm.randn(20), index=index)
strings = Series(tm.makeStringIndex(10), index=index[::2])
combined = strings.combine_first(floats)
tm.assert_dict_equal(strings, combined, compare_keys=False)
tm.assert_dict_equal(floats[1::2], combined, compare_keys=False)
# corner case
s = Series([1., 2, 3], index=[0, 1, 2])
result = s.combine_first(Series([], index=[]))
assert_series_equal(s, result)
def test_update(self):
s = Series([1.5, nan, 3., 4., nan])
s2 = Series([nan, 3.5, nan, 5.])
s.update(s2)
expected = Series([1.5, 3.5, 3., 5., np.nan])
assert_series_equal(s, expected)
# GH 3217
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df['c'] = np.nan
# this will fail as long as series is a sub-class of ndarray
# df['c'].update(Series(['foo'],index=[0])) #####
def test_corr(self):
tm._skip_if_no_scipy()
import scipy.stats as stats
# full overlap
self.assertAlmostEqual(self.ts.corr(self.ts), 1)
# partial overlap
self.assertAlmostEqual(self.ts[:15].corr(self.ts[5:]), 1)
self.assertTrue(isnull(self.ts[:15].corr(self.ts[5:], min_periods=12)))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
self.assertTrue(isnull(ts1.corr(ts2, min_periods=12)))
# No overlap
self.assertTrue(np.isnan(self.ts[::2].corr(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assertTrue(isnull(cp.corr(cp)))
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
result = A.corr(B)
expected, _ = stats.pearsonr(A, B)
self.assertAlmostEqual(result, expected)
def test_corr_rank(self):
tm._skip_if_no_scipy()
import scipy
import scipy.stats as stats
# kendall and spearman
A = tm.makeTimeSeries()
B = tm.makeTimeSeries()
A[-5:] = A[:5]
result = A.corr(B, method='kendall')
expected = stats.kendalltau(A, B)[0]
self.assertAlmostEqual(result, expected)
result = A.corr(B, method='spearman')
expected = stats.spearmanr(A, B)[0]
self.assertAlmostEqual(result, expected)
# these methods got rewritten in 0.8
if scipy.__version__ < LooseVersion('0.9'):
raise nose.SkipTest("skipping corr rank because of scipy version "
"{0}".format(scipy.__version__))
# results from R
A = Series([-0.89926396, 0.94209606, -1.03289164, -0.95445587,
0.76910310, -0.06430576, -2.09704447, 0.40660407,
-0.89926396, 0.94209606])
B = Series([-1.01270225, -0.62210117, -1.56895827, 0.59592943,
-0.01680292, 1.17258718, -1.06009347, -0.10222060,
-0.89076239, 0.89372375])
kexp = 0.4319297
sexp = 0.5853767
self.assertAlmostEqual(A.corr(B, method='kendall'), kexp)
self.assertAlmostEqual(A.corr(B, method='spearman'), sexp)
def test_cov(self):
# full overlap
self.assertAlmostEqual(self.ts.cov(self.ts), self.ts.std() ** 2)
# partial overlap
self.assertAlmostEqual(
self.ts[:15].cov(self.ts[5:]), self.ts[5:15].std() ** 2)
# No overlap
self.assertTrue(np.isnan(self.ts[::2].cov(self.ts[1::2])))
# all NA
cp = self.ts[:10].copy()
cp[:] = np.nan
self.assertTrue(isnull(cp.cov(cp)))
# min_periods
self.assertTrue(isnull(self.ts[:15].cov(self.ts[5:], min_periods=12)))
ts1 = self.ts[:15].reindex(self.ts.index)
ts2 = self.ts[5:].reindex(self.ts.index)
self.assertTrue(isnull(ts1.cov(ts2, min_periods=12)))
def test_copy(self):
ts = self.ts.copy()
ts[::2] = np.NaN
# Did not modify original Series
self.assertFalse(np.isnan(self.ts[0]))
def test_count(self):
self.assertEqual(self.ts.count(), len(self.ts))
self.ts[::2] = np.NaN
self.assertEqual(self.ts.count(), np.isfinite(self.ts).sum())
mi = MultiIndex.from_arrays([list('aabbcc'), [1, 2, 2, nan, 1, 2]])
ts = Series(np.arange(len(mi)), index=mi)
left = ts.count(level=1)
right = Series([2, 3, 1], index=[1, 2, nan])
assert_series_equal(left, right)
ts.iloc[[0, 3, 5]] = nan
assert_series_equal(ts.count(level=1), right - 1)
def test_dtype(self):
self.assertEqual(self.ts.dtype, np.dtype('float64'))
self.assertEqual(self.ts.dtypes, np.dtype('float64'))
self.assertEqual(self.ts.ftype, 'float64:dense')
self.assertEqual(self.ts.ftypes, 'float64:dense')
assert_series_equal(self.ts.get_dtype_counts(),Series(1,['float64']))
assert_series_equal(self.ts.get_ftype_counts(),Series(1,['float64:dense']))
def test_dot(self):
a = Series(np.random.randn(4), index=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(3, 4), index=['1', '2', '3'],
columns=['p', 'q', 'r', 's']).T
result = a.dot(b)
expected = Series(np.dot(a.values, b.values),
index=['1', '2', '3'])
assert_series_equal(result, expected)
# Check index alignment
b2 = b.reindex(index=reversed(b.index))
result = a.dot(b)
assert_series_equal(result, expected)
# Check ndarray argument
result = a.dot(b.values)
self.assertTrue(np.all(result == expected.values))
assert_almost_equal(a.dot(b['2'].values), expected['2'])
# Check series argument
assert_almost_equal(a.dot(b['1']), expected['1'])
assert_almost_equal(a.dot(b2['1']), expected['1'])
self.assertRaises(Exception, a.dot, a.values[:3])
self.assertRaises(ValueError, a.dot, b.T)
def test_value_counts_nunique(self):
# basics.rst doc example
series = Series(np.random.randn(500))
series[20:500] = np.nan
series[10:20] = 5000
result = series.nunique()
self.assertEqual(result, 11)
def test_unique(self):
# 714 also, dtype=float
s = Series([1.2345] * 100)
s[::2] = np.nan
result = s.unique()
self.assertEqual(len(result), 2)
s = Series([1.2345] * 100, dtype='f4')
s[::2] = np.nan
result = s.unique()
self.assertEqual(len(result), 2)
# NAs in object arrays #714
s = Series(['foo'] * 100, dtype='O')
s[::2] = np.nan
result = s.unique()
self.assertEqual(len(result), 2)
# decision about None
s = Series([1, 2, 3, None, None, None], dtype=object)
result = s.unique()
expected = np.array([1, 2, 3, None], dtype=object)
self.assert_numpy_array_equal(result, expected)
def test_dropna_empty(self):
s = Series([])
self.assertEqual(len(s.dropna()), 0)
s.dropna(inplace=True)
self.assertEqual(len(s), 0)
# invalid axis
self.assertRaises(ValueError, s.dropna, axis=1)
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-01-03 10:00'), pd.NaT])
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-03 10:00')], index=[0, 2])
self.assert_series_equal(result, expected)
# DatetimeBlockTZ
idx = pd.DatetimeIndex(['2011-01-01 10:00', pd.NaT,
'2011-01-03 10:00', pd.NaT],
tz='Asia/Tokyo')
s = pd.Series(idx)
self.assertEqual(s.dtype, 'datetime64[ns, Asia/Tokyo]')
result = s.dropna()
expected = Series([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-03 10:00', tz='Asia/Tokyo')],
index=[0, 2])
self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]')
self.assert_series_equal(result, expected)
def test_dropna_no_nan(self):
for s in [Series([1, 2, 3], name='x'),
Series([False, True, False], name='x')]:
result = s.dropna()
self.assert_series_equal(result, s)
self.assertFalse(result is s)
s2 = s.copy()
s2.dropna(inplace=True)
self.assert_series_equal(s2, s)
def test_axis_alias(self):
s = Series([1, 2, np.nan])
assert_series_equal(s.dropna(axis='rows'), s.dropna(axis='index'))
self.assertEqual(s.dropna().sum('rows'), 3)
self.assertEqual(s._get_axis_number('rows'), 0)
self.assertEqual(s._get_axis_name('rows'), 'index')
def test_drop_duplicates(self):
# check both int and object
for s in [Series([1, 2, 3, 3]), Series(['1', '2', '3', '3'])]:
expected = Series([False, False, False, True])
assert_series_equal(s.duplicated(), expected)
assert_series_equal(s.drop_duplicates(), s[~expected])
sc = s.copy()
sc.drop_duplicates(inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, False, True, False])
assert_series_equal(s.duplicated(keep='last'), expected)
assert_series_equal(s.drop_duplicates(keep='last'), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, s[~expected])
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.duplicated(take_last=True), expected)
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.drop_duplicates(take_last=True), s[~expected])
sc = s.copy()
with tm.assert_produces_warning(FutureWarning):
sc.drop_duplicates(take_last=True, inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, False, True, True])
assert_series_equal(s.duplicated(keep=False), expected)
assert_series_equal(s.drop_duplicates(keep=False), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep=False, inplace=True)
assert_series_equal(sc, s[~expected])
for s in [Series([1, 2, 3, 5, 3, 2, 4]),
Series(['1', '2', '3', '5', '3', '2', '4'])]:
expected = Series([False, False, False, False, True, True, False])
assert_series_equal(s.duplicated(), expected)
assert_series_equal(s.drop_duplicates(), s[~expected])
sc = s.copy()
sc.drop_duplicates(inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, True, True, False, False, False, False])
assert_series_equal(s.duplicated(keep='last'), expected)
assert_series_equal(s.drop_duplicates(keep='last'), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep='last', inplace=True)
assert_series_equal(sc, s[~expected])
# deprecate take_last
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.duplicated(take_last=True), expected)
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.drop_duplicates(take_last=True), s[~expected])
sc = s.copy()
with tm.assert_produces_warning(FutureWarning):
sc.drop_duplicates(take_last=True, inplace=True)
assert_series_equal(sc, s[~expected])
expected = Series([False, True, True, False, True, True, False])
assert_series_equal(s.duplicated(keep=False), expected)
assert_series_equal(s.drop_duplicates(keep=False), s[~expected])
sc = s.copy()
sc.drop_duplicates(keep=False, inplace=True)
assert_series_equal(sc, s[~expected])
def test_sort_values(self):
ts = self.ts.copy()
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
ts.sort()
self.assert_numpy_array_equal(ts, self.ts.sort_values())
self.assert_numpy_array_equal(ts.index, self.ts.sort_values().index)
ts.sort_values(ascending=False, inplace=True)
self.assert_numpy_array_equal(ts, self.ts.sort_values(ascending=False))
self.assert_numpy_array_equal(ts.index,
self.ts.sort_values(ascending=False).index)
# GH 5856/5853
# Series.sort_values operating on a view
df = DataFrame(np.random.randn(10,4))
s = df.iloc[:,0]
def f():
s.sort_values(inplace=True)
self.assertRaises(ValueError, f)
# test order/sort inplace
# GH6859
ts1 = self.ts.copy()
ts1.sort_values(ascending=False, inplace=True)
ts2 = self.ts.copy()
ts2.sort_values(ascending=False, inplace=True)
assert_series_equal(ts1,ts2)
ts1 = self.ts.copy()
ts1 = ts1.sort_values(ascending=False, inplace=False)
ts2 = self.ts.copy()
ts2 = ts.sort_values(ascending=False)
assert_series_equal(ts1,ts2)
def test_sort_index(self):
rindex = list(self.ts.index)
random.shuffle(rindex)
random_order = self.ts.reindex(rindex)
sorted_series = random_order.sort_index()
assert_series_equal(sorted_series, self.ts)
# descending
sorted_series = random_order.sort_index(ascending=False)
assert_series_equal(sorted_series,
self.ts.reindex(self.ts.index[::-1]))
def test_sort_index_inplace(self):
# For #11402
rindex = list(self.ts.index)
random.shuffle(rindex)
# descending
random_order = self.ts.reindex(rindex)
result = random_order.sort_index(ascending=False, inplace=True)
self.assertIs(result, None,
msg='sort_index() inplace should return None')
assert_series_equal(random_order,
self.ts.reindex(self.ts.index[::-1]))
# ascending
random_order = self.ts.reindex(rindex)
result = random_order.sort_index(ascending=True, inplace=True)
self.assertIs(result, None,
msg='sort_index() inplace should return None')
assert_series_equal(random_order, self.ts)
def test_sort_API(self):
# API for 9816
# sortlevel
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
res = s.sort_index(level='A')
assert_series_equal(backwards, res)
# sort_index
rindex = list(self.ts.index)
random.shuffle(rindex)
random_order = self.ts.reindex(rindex)
sorted_series = random_order.sort_index(level=0)
assert_series_equal(sorted_series, self.ts)
# compat on axis
sorted_series = random_order.sort_index(axis=0)
assert_series_equal(sorted_series, self.ts)
self.assertRaises(ValueError, lambda : random_order.sort_values(axis=1))
sorted_series = random_order.sort_index(level=0, axis=0)
assert_series_equal(sorted_series, self.ts)
self.assertRaises(ValueError, lambda : random_order.sort_index(level=0, axis=1))
def test_order(self):
# 9816 deprecated
with tm.assert_produces_warning(FutureWarning):
self.ts.order()
ts = self.ts.copy()
ts[:5] = np.NaN
vals = ts.values
result = ts.sort_values()
self.assertTrue(np.isnan(result[-5:]).all())
self.assert_numpy_array_equal(result[:-5], np.sort(vals[5:]))
result = ts.sort_values(na_position='first')
self.assertTrue(np.isnan(result[:5]).all())
self.assert_numpy_array_equal(result[5:], np.sort(vals[5:]))
# something object-type
ser = Series(['A', 'B'], [1, 2])
# no failure
ser.sort_values()
# ascending=False
ordered = ts.sort_values(ascending=False)
expected = np.sort(ts.valid().values)[::-1]
assert_almost_equal(expected, ordered.valid().values)
ordered = ts.sort_values(ascending=False, na_position='first')
assert_almost_equal(expected, ordered.valid().values)
def test_nsmallest_nlargest(self):
# float, int, datetime64 (use i8), timedelts64 (same),
# object that are numbers, object that are strings
base = [3, 2, 1, 2, 5]
s_list = [
Series(base, dtype='int8'),
Series(base, dtype='int16'),
Series(base, dtype='int32'),
Series(base, dtype='int64'),
Series(base, dtype='float32'),
Series(base, dtype='float64'),
Series(base, dtype='uint8'),
Series(base, dtype='uint16'),
Series(base, dtype='uint32'),
Series(base, dtype='uint64'),
Series(base).astype('timedelta64[ns]'),
Series(pd.to_datetime(['2003', '2002', '2001', '2002', '2005'])),
]
raising = [
Series([3., 2, 1, 2, '5'], dtype='object'),
Series([3., 2, 1, 2, 5], dtype='object'),
# not supported on some archs
# Series([3., 2, 1, 2, 5], dtype='complex256'),
Series([3., 2, 1, 2, 5], dtype='complex128'),
]
for r in raising:
dt = r.dtype
msg = "Cannot use method 'n(larg|small)est' with dtype %s" % dt
args = 2, len(r), 0, -1
methods = r.nlargest, r.nsmallest
for method, arg in product(methods, args):
with tm.assertRaisesRegexp(TypeError, msg):
method(arg)
for s in s_list:
assert_series_equal(s.nsmallest(2), s.iloc[[2, 1]])
assert_series_equal(s.nsmallest(2, keep='last'), s.iloc[[2, 3]])
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.nsmallest(2, take_last=True), s.iloc[[2, 3]])
assert_series_equal(s.nlargest(3), s.iloc[[4, 0, 1]])
assert_series_equal(s.nlargest(3, keep='last'), s.iloc[[4, 0, 3]])
with tm.assert_produces_warning(FutureWarning):
assert_series_equal(s.nlargest(3, take_last=True), s.iloc[[4, 0, 3]])
empty = s.iloc[0:0]
assert_series_equal(s.nsmallest(0), empty)
assert_series_equal(s.nsmallest(-1), empty)
assert_series_equal(s.nlargest(0), empty)
assert_series_equal(s.nlargest(-1), empty)
assert_series_equal(s.nsmallest(len(s)), s.sort_values())
assert_series_equal(s.nsmallest(len(s) + 1), s.sort_values())
assert_series_equal(s.nlargest(len(s)), s.iloc[[4, 0, 1, 3, 2]])
assert_series_equal(s.nlargest(len(s) + 1),
s.iloc[[4, 0, 1, 3, 2]])
s = Series([3., np.nan, 1, 2, 5])
assert_series_equal(s.nlargest(), s.iloc[[4, 0, 3, 2]])
assert_series_equal(s.nsmallest(), s.iloc[[2, 3, 0, 4]])
msg = 'keep must be either "first", "last"'
with tm.assertRaisesRegexp(ValueError, msg):
s.nsmallest(keep='invalid')
with tm.assertRaisesRegexp(ValueError, msg):
s.nlargest(keep='invalid')
def test_rank(self):
tm._skip_if_no_scipy()
from scipy.stats import rankdata
self.ts[::2] = np.nan
self.ts[:10][::3] = 4.
ranks = self.ts.rank()
oranks = self.ts.astype('O').rank()
assert_series_equal(ranks, oranks)
mask = np.isnan(self.ts)
filled = self.ts.fillna(np.inf)
# rankdata returns a ndarray
exp = Series(rankdata(filled),index=filled.index)
exp[mask] = np.nan
assert_almost_equal(ranks, exp)
iseries = Series(np.arange(5).repeat(2))
iranks = iseries.rank()
exp = iseries.astype(float).rank()
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
exp = iseries / 5.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.repeat(1, 100))
exp = Series(np.repeat(0.505, 100))
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries[1] = np.nan
exp = Series(np.repeat(50.0 / 99.0, 100))
exp[1] = np.nan
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1.0
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.repeat(np.nan, 100))
exp = iseries.copy()
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series(np.arange(5)) + 1
iseries[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
rng = date_range('1/1/1990', periods=5)
iseries = Series(np.arange(5), rng) + 1
iseries.ix[4] = np.nan
exp = iseries / 4.0
iranks = iseries.rank(pct=True)
assert_series_equal(iranks, exp)
iseries = Series([1e-50, 1e-100, 1e-20, 1e-2, 1e-20+1e-30, 1e-1])
exp = Series([2, 1, 3, 5, 4, 6.0])
iranks = iseries.rank()
assert_series_equal(iranks, exp)
values = np.array([-50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40], dtype='float64')
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_rank_inf(self):
raise nose.SkipTest('DataFrame.rank does not currently rank np.inf and -np.inf properly')
values = np.array([-np.inf, -50, -1, -1e-20, -1e-25, -1e-50, 0, 1e-40, 1e-20, 1e-10, 2, 40, np.inf], dtype='float64')
random_order = np.random.permutation(len(values))
iseries = Series(values[random_order])
exp = Series(random_order + 1.0, dtype='float64')
iranks = iseries.rank()
assert_series_equal(iranks, exp)
def test_from_csv(self):
with ensure_clean() as path:
self.ts.to_csv(path)
ts = Series.from_csv(path)
assert_series_equal(self.ts, ts, check_names=False)
self.assertTrue(ts.name is None)
self.assertTrue(ts.index.name is None)
# GH10483
self.ts.to_csv(path, header=True)
ts_h = Series.from_csv(path, header=0)
self.assertTrue(ts_h.name == 'ts')
self.series.to_csv(path)
series = Series.from_csv(path)
self.assertIsNone(series.name)
self.assertIsNone(series.index.name)
assert_series_equal(self.series, series, check_names=False)
self.assertTrue(series.name is None)
self.assertTrue(series.index.name is None)
self.series.to_csv(path, header=True)
series_h = Series.from_csv(path, header=0)
self.assertTrue(series_h.name == 'series')
outfile = open(path, 'w')
outfile.write('1998-01-01|1.0\n1999-01-01|2.0')
outfile.close()
series = Series.from_csv(path, sep='|')
checkseries = Series(
{datetime(1998, 1, 1): 1.0, datetime(1999, 1, 1): 2.0})
assert_series_equal(checkseries, series)
series = Series.from_csv(path, sep='|', parse_dates=False)
checkseries = Series({'1998-01-01': 1.0, '1999-01-01': 2.0})
assert_series_equal(checkseries, series)
def test_to_csv(self):
import io
with ensure_clean() as path:
self.ts.to_csv(path)
lines = io.open(path, newline=None).readlines()
assert(lines[1] != '\n')
self.ts.to_csv(path, index=False)
arr = np.loadtxt(path)
assert_almost_equal(arr, self.ts.values)
def test_to_csv_unicode_index(self):
buf = StringIO()
s = Series([u("\u05d0"), "d2"], index=[u("\u05d0"), u("\u05d1")])
s.to_csv(buf, encoding='UTF-8')
buf.seek(0)
s2 = Series.from_csv(buf, index_col=0, encoding='UTF-8')
assert_series_equal(s, s2)
def test_tolist(self):
rs = self.ts.tolist()
xp = self.ts.values.tolist()
assert_almost_equal(rs, xp)
# datetime64
s = Series(self.ts.index)
rs = s.tolist()
self.assertEqual(self.ts.index[0], rs[0])
def test_to_frame(self):
self.ts.name = None
rs = self.ts.to_frame()
xp = pd.DataFrame(self.ts.values, index=self.ts.index)
assert_frame_equal(rs, xp)
self.ts.name = 'testname'
rs = self.ts.to_frame()
xp = pd.DataFrame(dict(testname=self.ts.values), index=self.ts.index)
assert_frame_equal(rs, xp)
rs = self.ts.to_frame(name='testdifferent')
xp = pd.DataFrame(dict(testdifferent=self.ts.values), index=self.ts.index)
assert_frame_equal(rs, xp)
def test_to_dict(self):
self.assert_numpy_array_equal(Series(self.ts.to_dict()), self.ts)
def test_to_csv_float_format(self):
with ensure_clean() as filename:
ser = Series([0.123456, 0.234567, 0.567567])
ser.to_csv(filename, float_format='%.2f')
rs = Series.from_csv(filename)
xp = Series([0.12, 0.23, 0.57])
assert_series_equal(rs, xp)
def test_to_csv_list_entries(self):
s = Series(['jack and jill', 'jesse and frank'])
split = s.str.split(r'\s+and\s+')
buf = StringIO()
split.to_csv(buf)
def test_to_csv_path_is_none(self):
# GH 8215
# Series.to_csv() was returning None, inconsistent with
# DataFrame.to_csv() which returned string
s = Series([1, 2, 3])
csv_str = s.to_csv(path=None)
self.assertIsInstance(csv_str, str)
def test_str_attribute(self):
# GH9068
methods = ['strip', 'rstrip', 'lstrip']
s = Series([' jack', 'jill ', ' jesse ', 'frank'])
for method in methods:
expected = Series([getattr(str, method)(x) for x in s.values])
assert_series_equal(getattr(Series.str, method)(s.str), expected)
# str accessor only valid with string values
s = Series(range(5))
with self.assertRaisesRegexp(AttributeError, 'only use .str accessor'):
s.str.repeat(2)
def test_clip(self):
val = self.ts.median()
self.assertEqual(self.ts.clip_lower(val).min(), val)
self.assertEqual(self.ts.clip_upper(val).max(), val)
self.assertEqual(self.ts.clip(lower=val).min(), val)
self.assertEqual(self.ts.clip(upper=val).max(), val)
result = self.ts.clip(-0.5, 0.5)
expected = np.clip(self.ts, -0.5, 0.5)
assert_series_equal(result, expected)
tm.assertIsInstance(expected, Series)
def test_clip_types_and_nulls(self):
sers = [Series([np.nan, 1.0, 2.0, 3.0]),
Series([None, 'a', 'b', 'c']),
Series(pd.to_datetime([np.nan, 1, 2, 3], unit='D'))]
for s in sers:
thresh = s[2]
l = s.clip_lower(thresh)
u = s.clip_upper(thresh)
self.assertEqual(l[notnull(l)].min(), thresh)
self.assertEqual(u[notnull(u)].max(), thresh)
self.assertEqual(list(isnull(s)), list(isnull(l)))
self.assertEqual(list(isnull(s)), list(isnull(u)))
def test_clip_against_series(self):
# GH #6966
s = Series([1.0, 1.0, 4.0])
threshold = Series([1.0, 2.0, 3.0])
assert_series_equal(s.clip_lower(threshold), Series([1.0, 2.0, 4.0]))
assert_series_equal(s.clip_upper(threshold), Series([1.0, 1.0, 3.0]))
lower = Series([1.0, 2.0, 3.0])
upper = Series([1.5, 2.5, 3.5])
assert_series_equal(s.clip(lower, upper), Series([1.0, 2.0, 3.5]))
assert_series_equal(s.clip(1.5, upper), Series([1.5, 1.5, 3.5]))
def test_valid(self):
ts = self.ts.copy()
ts[::2] = np.NaN
result = ts.valid()
self.assertEqual(len(result), ts.count())
tm.assert_dict_equal(result, ts, compare_keys=False)
def test_isnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(
ser.isnull(), Series([False, False, False, True, False]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.isnull(), Series([False, False, True]).values)
def test_notnull(self):
ser = Series([0, 5.4, 3, nan, -0.001])
np.array_equal(
ser.notnull(), Series([True, True, True, False, True]).values)
ser = Series(["hi", "", nan])
np.array_equal(ser.notnull(), Series([True, True, False]).values)
def test_shift(self):
shifted = self.ts.shift(1)
unshifted = shifted.shift(-1)
tm.assert_dict_equal(unshifted.valid(), self.ts, compare_keys=False)
offset = datetools.bday
shifted = self.ts.shift(1, freq=offset)
unshifted = shifted.shift(-1, freq=offset)
assert_series_equal(unshifted, self.ts)
unshifted = self.ts.shift(0, freq=offset)
assert_series_equal(unshifted, self.ts)
shifted = self.ts.shift(1, freq='B')
unshifted = shifted.shift(-1, freq='B')
assert_series_equal(unshifted, self.ts)
# corner case
unshifted = self.ts.shift(0)
assert_series_equal(unshifted, self.ts)
# Shifting with PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.shift(1)
unshifted = shifted.shift(-1)
tm.assert_dict_equal(unshifted.valid(), ps, compare_keys=False)
shifted2 = ps.shift(1, 'B')
shifted3 = ps.shift(1, datetools.bday)
assert_series_equal(shifted2, shifted3)
assert_series_equal(ps, shifted2.shift(-1, 'B'))
self.assertRaises(ValueError, ps.shift, freq='D')
# legacy support
shifted4 = ps.shift(1, freq='B')
assert_series_equal(shifted2, shifted4)
shifted5 = ps.shift(1, freq=datetools.bday)
assert_series_equal(shifted5, shifted4)
# 32-bit taking
# GH 8129
index=date_range('2000-01-01',periods=5)
for dtype in ['int32','int64']:
s1 = Series(np.arange(5,dtype=dtype),index=index)
p = s1.iloc[1]
result = s1.shift(periods=p)
expected = Series([np.nan,0,1,2,3],index=index)
assert_series_equal(result,expected)
# xref 8260
# with tz
s = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'),name='foo')
result = s-s.shift()
assert_series_equal(result,Series(TimedeltaIndex(['NaT'] + ['1 days']*4),name='foo'))
# incompat tz
s2 = Series(date_range('2000-01-01 09:00:00',periods=5,tz='CET'),name='foo')
self.assertRaises(ValueError, lambda : s-s2)
def test_tshift(self):
# PeriodIndex
ps = tm.makePeriodSeries()
shifted = ps.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(unshifted, ps)
shifted2 = ps.tshift(freq='B')
assert_series_equal(shifted, shifted2)
shifted3 = ps.tshift(freq=datetools.bday)
assert_series_equal(shifted, shifted3)
self.assertRaises(ValueError, ps.tshift, freq='M')
# DatetimeIndex
shifted = self.ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(self.ts, unshifted)
shifted2 = self.ts.tshift(freq=self.ts.index.freq)
assert_series_equal(shifted, shifted2)
inferred_ts = Series(self.ts.values, Index(np.asarray(self.ts.index)),
name='ts')
shifted = inferred_ts.tshift(1)
unshifted = shifted.tshift(-1)
assert_series_equal(shifted, self.ts.tshift(1))
assert_series_equal(unshifted, inferred_ts)
no_freq = self.ts[[0, 5, 7]]
self.assertRaises(ValueError, no_freq.tshift)
def test_shift_int(self):
ts = self.ts.astype(int)
shifted = ts.shift(1)
expected = ts.astype(float).shift(1)
assert_series_equal(shifted, expected)
def test_shift_categorical(self):
# GH 9416
s = pd.Series(['a', 'b', 'c', 'd'], dtype='category')
assert_series_equal(s.iloc[:-1], s.shift(1).shift(-1).valid())
sp1 = s.shift(1)
assert_index_equal(s.index, sp1.index)
self.assertTrue(np.all(sp1.values.codes[:1] == -1))
self.assertTrue(np.all(s.values.codes[:-1] == sp1.values.codes[1:]))
sn2 = s.shift(-2)
assert_index_equal(s.index, sn2.index)
self.assertTrue(np.all(sn2.values.codes[-2:] == -1))
self.assertTrue(np.all(s.values.codes[2:] == sn2.values.codes[:-2]))
assert_index_equal(s.values.categories, sp1.values.categories)
assert_index_equal(s.values.categories, sn2.values.categories)
def test_truncate(self):
offset = datetools.bday
ts = self.ts[::3]
start, end = self.ts.index[3], self.ts.index[6]
start_missing, end_missing = self.ts.index[2], self.ts.index[7]
# neither specified
truncated = ts.truncate()
assert_series_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
assert_series_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
assert_series_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
assert_series_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
assert_series_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
assert_series_equal(truncated, expected)
# corner case, empty series returned
truncated = ts.truncate(after=self.ts.index[0] - offset)
assert(len(truncated) == 0)
truncated = ts.truncate(before=self.ts.index[-1] + offset)
assert(len(truncated) == 0)
self.assertRaises(ValueError, ts.truncate,
before=self.ts.index[-1] + offset,
after=self.ts.index[0] - offset)
def test_ptp(self):
N = 1000
arr = np.random.randn(N)
ser = Series(arr)
self.assertEqual(np.ptp(ser), np.ptp(arr))
# GH11163
s = Series([3, 5, np.nan, -3, 10])
self.assertEqual(s.ptp(), 13)
self.assertTrue(pd.isnull(s.ptp(skipna=False)))
mi = pd.MultiIndex.from_product([['a','b'], [1,2,3]])
s = pd.Series([1, np.nan, 7, 3, 5, np.nan], index=mi)
expected = pd.Series([6, 2], index=['a', 'b'], dtype=np.float64)
self.assert_series_equal(s.ptp(level=0), expected)
expected = pd.Series([np.nan, np.nan], index=['a', 'b'])
self.assert_series_equal(s.ptp(level=0, skipna=False), expected)
with self.assertRaises(ValueError):
s.ptp(axis=1)
s = pd.Series(['a', 'b', 'c', 'd', 'e'])
with self.assertRaises(TypeError):
s.ptp()
with self.assertRaises(NotImplementedError):
s.ptp(numeric_only=True)
def test_asof(self):
# array or list or dates
N = 50
rng = date_range('1/1/1990', periods=N, freq='53s')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='25s')
result = ts.asof(dates)
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
self.assertTrue((rs == ts[lb]).all())
val = result[result.index[result.index >= ub][0]]
self.assertEqual(ts[ub], val)
self.ts[5:10] = np.NaN
self.ts[15:20] = np.NaN
val1 = self.ts.asof(self.ts.index[7])
val2 = self.ts.asof(self.ts.index[19])
self.assertEqual(val1, self.ts[4])
self.assertEqual(val2, self.ts[14])
# accepts strings
val1 = self.ts.asof(str(self.ts.index[7]))
self.assertEqual(val1, self.ts[4])
# in there
self.assertEqual(self.ts.asof(self.ts.index[3]), self.ts[3])
# no as of value
d = self.ts.index[0] - datetools.bday
self.assertTrue(np.isnan(self.ts.asof(d)))
def test_getitem_setitem_datetimeindex(self):
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04:00:00"]
expected = ts[4]
self.assertEqual(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00"] = 0
result["1990-01-01 04:00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04:00:00"
rb = "1990-01-01 07:00:00"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# repeat all the above with naive datetimes
result = ts[datetime(1990, 1, 1, 4)]
expected = ts[4]
self.assertEqual(result, expected)
result = ts.copy()
result[datetime(1990, 1, 1, 4)] = 0
result[datetime(1990, 1, 1, 4)] = ts[4]
assert_series_equal(result, ts)
result = ts[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = 0
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = ts[4:8]
assert_series_equal(result, ts)
lb = datetime(1990, 1, 1, 4)
rb = datetime(1990, 1, 1, 7)
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts[ts.index[4]]
expected = ts[4]
self.assertEqual(result, expected)
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
# also test partial date slicing
result = ts["1990-01-02"]
expected = ts[24:48]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-02"] = 0
result["1990-01-02"] = ts[24:48]
assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_pytz(self):
tm._skip_if_no_pytz()
from pytz import timezone as tz
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
# comparison dates with datetime MUST be localized!
date = tz('US/Central').localize(datetime(1990, 1, 1, 3))
result[date] = 0
result[date] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_dateutil(self):
tm._skip_if_no_dateutil()
from dateutil.tz import tzutc
from pandas.tslib import _dateutil_gettz as gettz
tz = lambda x: tzutc() if x == 'UTC' else gettz(x) # handle special case for utc in dateutil
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = 0
result[datetime(1990, 1, 1, 3, tzinfo=tz('US/Central'))] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_periodindex(self):
from pandas import period_range
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04"]
expected = ts[4]
self.assertEqual(result, expected)
result = ts.copy()
result["1990-01-01 04"] = 0
result["1990-01-01 04"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04":"1990-01-01 07"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04":"1990-01-01 07"] = 0
result["1990-01-01 04":"1990-01-01 07"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04"
rb = "1990-01-01 07"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# GH 2782
result = ts[ts.index[4]]
expected = ts[4]
self.assertEqual(result, expected)
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
def test_asof_periodindex(self):
from pandas import period_range, PeriodIndex
# array or list or dates
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='37min')
result = ts.asof(dates)
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
self.assertTrue(notnull(result).all())
lb = ts.index[14]
ub = ts.index[30]
pix = PeriodIndex(result.index.values, freq='H')
mask = (pix >= lb) & (pix < ub)
rs = result[mask]
self.assertTrue((rs == ts[lb]).all())
ts[5:10] = np.NaN
ts[15:20] = np.NaN
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
self.assertEqual(val1, ts[4])
self.assertEqual(val2, ts[14])
# accepts strings
val1 = ts.asof(str(ts.index[7]))
self.assertEqual(val1, ts[4])
# in there
self.assertEqual(ts.asof(ts.index[3]), ts[3])
# no as of value
d = ts.index[0].to_timestamp() - datetools.bday
self.assertTrue(np.isnan(ts.asof(d)))
def test_asof_more(self):
from pandas import date_range
s = Series([nan, nan, 1, 2, nan, nan, 3, 4, 5],
index=date_range('1/1/2000', periods=9))
dates = s.index[[4, 5, 6, 2, 1]]
result = s.asof(dates)
expected = Series([2, 2, 3, 1, np.nan], index=dates)
assert_series_equal(result, expected)
s = Series([1.5, 2.5, 1, 2, nan, nan, 3, 4, 5],
index=date_range('1/1/2000', periods=9))
result = s.asof(s.index[0])
self.assertEqual(result, s[0])
def test_cast_on_putmask(self):
# GH 2746
# need to upcast
s = Series([1, 2], index=[1, 2], dtype='int64')
s[[True, False]] = Series([0], index=[1], dtype='int64')
expected = Series([0, 2], index=[1, 2], dtype='int64')
assert_series_equal(s, expected)
def test_type_promote_putmask(self):
# GH8387: test that changing types does not break alignment
ts = Series(np.random.randn(100), index=np.arange(100,0,-1)).round(5)
left, mask = ts.copy(), ts > 0
right = ts[mask].copy().map(str)
left[mask] = right
assert_series_equal(left, ts.map(lambda t: str(t) if t > 0 else t))
s = Series([0, 1, 2, 0 ])
mask = s > 0
s2 = s[ mask ].map( str )
s[mask] = s2
assert_series_equal(s, Series([0, '1', '2', 0]))
s = Series([0, 'foo', 'bar', 0 ])
mask = Series([False, True, True, False])
s2 = s[ mask ]
s[mask] = s2
assert_series_equal(s, Series([0, 'foo','bar', 0]))
def test_astype_cast_nan_int(self):
df = Series([1.0, 2.0, 3.0, np.nan])
self.assertRaises(ValueError, df.astype, np.int64)
def test_astype_cast_object_int(self):
arr = Series(["car", "house", "tree", "1"])
self.assertRaises(ValueError, arr.astype, int)
self.assertRaises(ValueError, arr.astype, np.int64)
self.assertRaises(ValueError, arr.astype, np.int8)
arr = Series(['1', '2', '3', '4'], dtype=object)
result = arr.astype(int)
self.assert_numpy_array_equal(result, np.arange(1, 5))
def test_astype_datetimes(self):
import pandas.tslib as tslib
s = Series(tslib.iNaT, dtype='M8[ns]', index=lrange(5))
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
self.assertEqual(s.dtype, 'M8[ns]')
s = s.astype('O')
self.assertEqual(s.dtype, np.object_)
def test_astype_str(self):
# GH4405
digits = string.digits
s1 = Series([digits * 10, tm.rands(63), tm.rands(64),
tm.rands(1000)])
s2 = Series([digits * 10, tm.rands(63), tm.rands(64), nan, 1.0])
types = (compat.text_type, np.str_)
for typ in types:
for s in (s1, s2):
res = s.astype(typ)
expec = s.map(compat.text_type)
assert_series_equal(res, expec)
# GH9757
# Test str and unicode on python 2.x and just str on python 3.x
for tt in set([str, compat.text_type]):
ts = Series([Timestamp('2010-01-04 00:00:00')])
s = ts.astype(tt)
expected = Series([tt('2010-01-04')])
assert_series_equal(s, expected)
ts = Series([Timestamp('2010-01-04 00:00:00', tz='US/Eastern')])
s = ts.astype(tt)
expected = Series([tt('2010-01-04 00:00:00-05:00')])
assert_series_equal(s, expected)
td = Series([Timedelta(1, unit='d')])
s = td.astype(tt)
expected = Series([tt('1 days 00:00:00.000000000')])
assert_series_equal(s, expected)
def test_astype_unicode(self):
# GH7758
# a bit of magic is required to set default encoding encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([u('データーサイエンス、お前はもう死んでいる')]),
]
former_encoding = None
if not compat.PY3:
# in python we can force the default encoding
# for this test
former_encoding = sys.getdefaultencoding()
reload(sys)
sys.setdefaultencoding("utf-8")
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series([u('野菜食べないとやばい').encode("utf-8")]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(compat.text_type)
assert_series_equal(res, expec)
# restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys)
sys.setdefaultencoding(former_encoding)
def test_map(self):
index, data = tm.getMixedTypeDict()
source = Series(data['B'], index=data['C'])
target = Series(data['C'][:4], index=data['D'][:4])
merged = target.map(source)
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# input could be a dict
merged = target.map(source.to_dict())
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# function
result = self.ts.map(lambda x: x * 2)
self.assert_numpy_array_equal(result, self.ts * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
self.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
self.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series([1, 2, 3, 4], index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series([1, 2, 3, 4], index=Index(['b', 'c', 'd', 'e']))
exp = Series([np.nan, 1, 2, 3])
self.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
self.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series(['B', 'C', 'D', 'E'], dtype='category',
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series(['B', 'C', 'D', 'E'], index=Index(['b', 'c', 'd', 'e']))
exp = Series([np.nan, 'B', 'C', 'D'], dtype='category')
self.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 'B', 'C', 'D'])
self.assert_series_equal(a.map(c), exp)
def test_map_compat(self):
# related GH 8024
s = Series([True,True,False],index=[1,2,3])
result = s.map({ True : 'foo', False : 'bar' })
expected = Series(['foo','foo','bar'],index=[1,2,3])
assert_series_equal(result,expected)
def test_map_int(self):
left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4})
right = Series({1: 11, 2: 22, 3: 33})
self.assertEqual(left.dtype, np.float_)
self.assertTrue(issubclass(right.dtype.type, np.integer))
merged = left.map(right)
self.assertEqual(merged.dtype, np.float_)
self.assertTrue(isnull(merged['d']))
self.assertTrue(not isnull(merged['c']))
def test_map_type_inference(self):
s = Series(lrange(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
self.assertTrue(issubclass(s2.dtype.type, np.integer))
def test_divide_decimal(self):
''' resolves issue #9787 '''
from decimal import Decimal
expected = Series([Decimal(5)])
s = Series([Decimal(10)])
s = s/Decimal(2)
tm.assert_series_equal(expected, s)
s = Series([Decimal(10)])
s = s//Decimal(2)
tm.assert_series_equal(expected, s)
def test_map_decimal(self):
from decimal import Decimal
result = self.series.map(lambda x: Decimal(str(x)))
self.assertEqual(result.dtype, np.object_)
tm.assertIsInstance(result[0], Decimal)
def test_map_na_exclusion(self):
s = Series([1.5, np.nan, 3, np.nan, 5])
result = s.map(lambda x: x * 2, na_action='ignore')
exp = s * 2
assert_series_equal(result, exp)
def test_map_dict_with_tuple_keys(self):
'''
Due to new MultiIndex-ing behaviour in v0.14.0,
dicts with tuple keys passed to map were being
converted to a multi-index, preventing tuple values
from being mapped properly.
'''
df = pd.DataFrame({'a': [(1,), (2,), (3, 4), (5, 6)]})
label_mappings = {
(1,): 'A',
(2,): 'B',
(3, 4): 'A',
(5, 6): 'B'
}
df['labels'] = df['a'].map(label_mappings)
df['expected_labels'] = pd.Series(['A', 'B', 'A', 'B'], index=df.index)
# All labels should be filled now
tm.assert_series_equal(df['labels'], df['expected_labels'], check_names=False)
def test_apply(self):
assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))
# elementwise-apply
import math
assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))
# how to handle Series result, #2316
result = self.ts.apply(lambda x: Series([x, x ** 2],
index=['x', 'x^2']))
expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2})
tm.assert_frame_equal(result, expected)
# empty series
s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
self.assertIsNot(s, rs)
self.assertIs(s.index, rs.index)
self.assertEqual(s.dtype, rs.dtype)
self.assertEqual(s.name, rs.name)
# index but no data
s = Series(index=[1, 2, 3])
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
f = lambda x: (x, x + 1)
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
f = lambda x: x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
self.assertEqual(result.dtype, object)
def test_convert_objects(self):
s = Series([1., 2, 3], index=['a', 'b', 'c'])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
expected = s.copy()
expected['a'] = np.nan
with tm.assert_produces_warning(FutureWarning):
result = r.convert_objects(convert_dates=False, convert_numeric=True)
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_numeric=True)
expected = Series([1, np.nan, 3, 4])
assert_series_equal(result, expected)
# dates
s = Series(
[datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(
2001, 1, 3, 0, 0), 'foo', 1.0, 1, Timestamp('20010104'), '20010105'], dtype='O')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates=True, convert_numeric=False)
expected = Series(
[Timestamp('20010101'), Timestamp('20010102'), Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=True)
assert_series_equal(result, expected)
expected = Series(
[Timestamp(
'20010101'), Timestamp('20010102'), Timestamp('20010103'),
lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'), Timestamp('20010105')], dtype='M8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = s2.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning):
result = s2.convert_objects(convert_dates='coerce',
convert_numeric=True)
assert_series_equal(result, expected)
# preserver all-nans (if convert_dates='coerce')
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, s)
# preserver if non-object
s = Series([1], dtype='float32')
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce',
convert_numeric=False)
assert_series_equal(result, s)
#r = s.copy()
#r[0] = np.nan
#result = r.convert_objects(convert_dates=True,convert_numeric=False)
#self.assertEqual(result.dtype, 'M8[ns]')
# dateutil parses some single letters into today's value as a date
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
s = Series([x.upper()])
with tm.assert_produces_warning(FutureWarning):
result = s.convert_objects(convert_dates='coerce')
assert_series_equal(result, s)
def test_convert_objects_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
with tm.assert_produces_warning(FutureWarning):
r = s.convert_objects(convert_numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_objects_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
with tm.assert_produces_warning(FutureWarning):
r = s.convert_objects(convert_numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
# GH 10265
def test_convert(self):
# Tests: All to nans, coerce, true
# Test coercion returns correct type
s = Series(['a', 'b', 'c'])
results = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT] * 3)
assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([np.nan] * 3)
assert_series_equal(results, expected)
expected = Series([lib.NaT] * 3, dtype=np.dtype('m8[ns]'))
results = s._convert(timedelta=True, coerce=True)
assert_series_equal(results, expected)
dt = datetime(2001, 1, 1, 0, 0)
td = dt - datetime(2000, 1, 1, 0, 0)
# Test coercion with mixed types
s = Series(['a', '3.1415', dt, td])
results = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT, lib.NaT, dt, lib.NaT])
assert_series_equal(results, expected)
results = s._convert(numeric=True, coerce=True)
expected = Series([nan, 3.1415, nan, nan])
assert_series_equal(results, expected)
results = s._convert(timedelta=True, coerce=True)
expected = Series([lib.NaT, lib.NaT, lib.NaT, td],
dtype=np.dtype('m8[ns]'))
assert_series_equal(results, expected)
# Test standard conversion returns original
results = s._convert(datetime=True)
assert_series_equal(results, s)
results = s._convert(numeric=True)
expected = Series([nan, 3.1415, nan, nan])
assert_series_equal(results, expected)
results = s._convert(timedelta=True)
assert_series_equal(results, s)
# test pass-through and non-conversion when other types selected
s = Series(['1.0','2.0','3.0'])
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([1.0,2.0,3.0])
assert_series_equal(results, expected)
results = s._convert(True,False,True)
assert_series_equal(results, s)
s = Series([datetime(2001, 1, 1, 0, 0),datetime(2001, 1, 1, 0, 0)],
dtype='O')
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([datetime(2001, 1, 1, 0, 0),datetime(2001, 1, 1, 0, 0)])
assert_series_equal(results, expected)
results = s._convert(datetime=False,numeric=True,timedelta=True)
assert_series_equal(results, s)
td = datetime(2001, 1, 1, 0, 0) - datetime(2000, 1, 1, 0, 0)
s = Series([td, td], dtype='O')
results = s._convert(datetime=True, numeric=True, timedelta=True)
expected = Series([td, td])
assert_series_equal(results, expected)
results = s._convert(True,True,False)
assert_series_equal(results, s)
s = Series([1., 2, 3], index=['a', 'b', 'c'])
result = s._convert(numeric=True)
assert_series_equal(result, s)
# force numeric conversion
r = s.copy().astype('O')
r['a'] = '1'
result = r._convert(numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = '1.'
result = r._convert(numeric=True)
assert_series_equal(result, s)
r = s.copy().astype('O')
r['a'] = 'garbled'
result = r._convert(numeric=True)
expected = s.copy()
expected['a'] = nan
assert_series_equal(result, expected)
# GH 4119, not converting a mixed type (e.g.floats and object)
s = Series([1, 'na', 3, 4])
result = s._convert(datetime=True, numeric=True)
expected = Series([1, nan, 3, 4])
assert_series_equal(result, expected)
s = Series([1, '', 3, 4])
result = s._convert(datetime=True, numeric=True)
assert_series_equal(result, expected)
# dates
s = Series(
[datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(2001, 1, 3, 0, 0)])
s2 = Series([datetime(2001, 1, 1, 0, 0), datetime(2001, 1, 2, 0, 0), datetime(
2001, 1, 3, 0, 0), 'foo', 1.0, 1, Timestamp('20010104'), '20010105'], dtype='O')
result = s._convert(datetime=True)
expected = Series(
[Timestamp('20010101'), Timestamp('20010102'), Timestamp('20010103')], dtype='M8[ns]')
assert_series_equal(result, expected)
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
expected = Series(
[Timestamp(
'20010101'), Timestamp('20010102'), Timestamp('20010103'),
lib.NaT, lib.NaT, lib.NaT, Timestamp('20010104'), Timestamp('20010105')], dtype='M8[ns]')
result = s2._convert(datetime=True,
numeric=False,
timedelta=False,
coerce=True)
assert_series_equal(result, expected)
result = s2._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
s = Series(['foo', 'bar', 1, 1.0], dtype='O')
result = s._convert(datetime=True, coerce=True)
expected = Series([lib.NaT]*4)
assert_series_equal(result, expected)
# preserver if non-object
s = Series([1], dtype='float32')
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, s)
#r = s.copy()
#r[0] = np.nan
#result = r._convert(convert_dates=True,convert_numeric=False)
#self.assertEqual(result.dtype, 'M8[ns]')
# dateutil parses some single letters into today's value as a date
expected = Series([lib.NaT])
for x in 'abcdefghijklmnopqrstuvwxyz':
s = Series([x])
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
s = Series([x.upper()])
result = s._convert(datetime=True, coerce=True)
assert_series_equal(result, expected)
def test_convert_no_arg_error(self):
s = Series(['1.0','2'])
self.assertRaises(ValueError, s._convert)
def test_convert_preserve_bool(self):
s = Series([1, True, 3, 5], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([1, 1, 3, 5], dtype='i8')
tm.assert_series_equal(r, e)
def test_convert_preserve_all_bool(self):
s = Series([False, True, False, False], dtype=object)
r = s._convert(datetime=True, numeric=True)
e = Series([False, True, False, False], dtype=bool)
tm.assert_series_equal(r, e)
def test_apply_args(self):
s = Series(['foo,bar'])
result = s.apply(str.split, args=(',',))
self.assertEqual(result[0], ['foo', 'bar'])
tm.assertIsInstance(result[0], list)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
self.assertTrue((aa.reindex(diff_a) == fill).all())
if len(diff_b) > 0:
self.assertTrue((ab.reindex(diff_b) == fill).all())
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
self.assertEqual(aa.name, 'ts')
self.assertEqual(ea.name, 'ts')
self.assertEqual(ab.name, 'ts')
self.assertEqual(eb.name, 'ts')
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
_check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
_check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
_check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind,
method=meth, limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
self.assertFalse((a[:5] == 5).any())
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
self.assertTrue((a[:5] == 5).all())
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
self.assertFalse((b[:3] == 5).any())
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
self.assertTrue((b[:2] == 5).all())
def test_align_sameindex(self):
a, b = self.ts.align(self.ts, copy=False)
self.assertIs(a.index, self.ts.index)
self.assertIs(b.index, self.ts.index)
# a, b = self.ts.align(self.ts, copy=True)
# self.assertIsNot(a.index, self.ts.index)
# self.assertIsNot(b.index, self.ts.index)
def test_align_multiindex(self):
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12,dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2,dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(self):
identity = self.series.reindex(self.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
self.assertTrue(np.may_share_memory(self.series.index, identity.index))
except (AttributeError):
pass
self.assertTrue(identity.index.is_(self.series.index))
self.assertTrue(identity.index.identical(self.series.index))
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
self.assertEqual(val, self.series[idx])
subIndex2 = self.ts.index[10:20]
subTS = self.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
self.assertEqual(val, self.ts[idx])
stuffSeries = self.ts.reindex(subIndex)
self.assertTrue(np.isnan(stuffSeries).all())
# This is extremely important for the Cython code to not screw up
nonContigIndex = self.ts.index[::2]
subNonContig = self.ts.reindex(nonContigIndex)
for idx, val in compat.iteritems(subNonContig):
self.assertEqual(val, self.ts[idx])
# return a copy the same index here
result = self.ts.reindex()
self.assertFalse((result is self.ts))
def test_reindex_nan(self):
ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])
i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]
assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype('object')
# reindex coerces index.dtype to float, loc/iloc doesn't
assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
def test_reindex_corner(self):
# (don't forget to fix this) I think it's fixed
reindexed_dep = self.empty.reindex(self.ts.index, method='pad')
# corner case: pad empty series
reindexed = self.empty.reindex(self.ts.index, method='pad')
# pass non-Index
reindexed = self.ts.reindex(list(self.ts.index))
assert_series_equal(self.ts, reindexed)
# bad fill method
ts = self.ts[::2]
self.assertRaises(Exception, ts.reindex, self.ts.index, method='foo')
def test_reindex_pad(self):
s = Series(np.arange(10),dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1,2,3,4,5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a','g','c','f']
expected = Series([1,1,3,3],index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
expected = Series([1, 5, 3, 5], index=new_index)
result = s.reindex(new_index, method='ffill')
assert_series_equal(result, expected)
# inferrence of new dtype
s = Series([True,False,False,True],index=list('abcd'))
new_index='agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True,True,False],index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False,index=lrange(0,5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False,index=lrange(0,5))
assert_series_equal(result, expected)
def test_reindex_nearest(self):
s = Series(np.arange(10, dtype='int64'))
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = Series(np.around(target).astype('int64'), target)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest')
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest', tolerance=1)
assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = Series([0, 1, np.nan, 2], target)
assert_series_equal(expected, actual)
def test_reindex_backfill(self):
pass
def test_reindex_int(self):
ts = self.ts[::2]
int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)
# this should work fine
reindexed_int = int_ts.reindex(self.ts.index)
# if NaNs introduced
self.assertEqual(reindexed_int.dtype, np.float_)
# NO NaNs introduced
reindexed_int = int_ts.reindex(int_ts.index[::2])
self.assertEqual(reindexed_int.dtype, np.int_)
def test_reindex_bool(self):
# A series other than float, int, string, or object
ts = self.ts[::2]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
# this should work fine
reindexed_bool = bool_ts.reindex(self.ts.index)
# if NaNs introduced
self.assertEqual(reindexed_bool.dtype, np.object_)
# NO NaNs introduced
reindexed_bool = bool_ts.reindex(bool_ts.index[::2])
self.assertEqual(reindexed_bool.dtype, np.bool_)
def test_reindex_bool_pad(self):
# fail
ts = self.ts[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(self.ts.index, method='pad')
self.assertTrue(isnull(filled_bool[:5]).all())
def test_reindex_like(self):
other = self.ts[::2]
assert_series_equal(self.ts.reindex(other.index),
self.ts.reindex_like(other))
# GH 7179
day1 = datetime(2013,3,5)
day2 = datetime(2013,5,5)
day3 = datetime(2014,3,5)
series1 = Series([5, None, None],[day1, day2, day3])
series2 = Series([None, None], [day1, day3])
result = series1.reindex_like(series2, method='pad')
expected = Series([5, np.nan], index=[day1, day3])
assert_series_equal(result, expected)
def test_reindex_fill_value(self):
#------------------------------------------------------------
# floats
floats = Series([1., 2., 3.])
result = floats.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
result = floats.reindex([1, 2, 3], fill_value=0)
expected = Series([2., 3., 0], index=[1, 2, 3])
assert_series_equal(result, expected)
#------------------------------------------------------------
# ints
ints = Series([1, 2, 3])
result = ints.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
self.assertTrue(issubclass(result.dtype.type, np.integer))
assert_series_equal(result, expected)
#------------------------------------------------------------
# objects
objects = Series([1, 2, 3], dtype=object)
result = objects.reindex([1, 2, 3])
expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = objects.reindex([1, 2, 3], fill_value='foo')
expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
#------------------------------------------------------------
# bools
bools = Series([True, False, True])
result = bools.reindex([1, 2, 3])
expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = bools.reindex([1, 2, 3], fill_value=False)
expected = Series([False, True, False], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_rename(self):
renamer = lambda x: x.strftime('%Y%m%d')
renamed = self.ts.rename(renamer)
self.assertEqual(renamed.index[0], renamer(self.ts.index[0]))
# dict
rename_dict = dict(zip(self.ts.index, renamed.index))
renamed2 = self.ts.rename(rename_dict)
assert_series_equal(renamed, renamed2)
# partial dict
s = Series(np.arange(4), index=['a', 'b', 'c', 'd'], dtype='int64')
renamed = s.rename({'b': 'foo', 'd': 'bar'})
self.assert_numpy_array_equal(renamed.index, ['a', 'foo', 'c', 'bar'])
# index with name
renamer = Series(
np.arange(4), index=Index(['a', 'b', 'c', 'd'], name='name'), dtype='int64')
renamed = renamer.rename({})
self.assertEqual(renamed.index.name, renamer.index.name)
def test_rename_inplace(self):
renamer = lambda x: x.strftime('%Y%m%d')
expected = renamer(self.ts.index[0])
self.ts.rename(renamer, inplace=True)
self.assertEqual(self.ts.index[0], expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
self.assertFalse(np.isnan(self.ts[10]))
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
self.assertTrue(tm.equalContents(ts.index != 5, expected))
self.assertTrue(tm.equalContents(~(ts.index == 5), expected))
def test_pad_nan(self):
x = Series([np.nan, 1., np.nan, 3., np.nan],
['z', 'a', 'b', 'c', 'd'], dtype=float)
x.fillna(method='pad', inplace=True)
expected = Series([np.nan, 1.0, 1.0, 3.0, 3.0],
['z', 'a', 'b', 'c', 'd'], dtype=float)
assert_series_equal(x[1:], expected[1:])
self.assertTrue(np.isnan(x[0]), np.isnan(expected[0]))
def test_unstack(self):
from numpy import nan
from pandas.util.testing import assert_frame_equal
index = MultiIndex(levels=[['bar', 'foo'], ['one', 'three', 'two']],
labels=[[1, 1, 0, 0], [0, 1, 0, 2]])
s = Series(np.arange(4.), index=index)
unstacked = s.unstack()
expected = DataFrame([[2., nan, 3.], [0., 1., nan]],
index=['bar', 'foo'],
columns=['one', 'three', 'two'])
assert_frame_equal(unstacked, expected)
unstacked = s.unstack(level=0)
assert_frame_equal(unstacked, expected.T)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
exp_index = MultiIndex(levels=[['one', 'two', 'three'], [0, 1]],
labels=[[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
expected = DataFrame({'bar': s.values}, index=exp_index).sortlevel(0)
unstacked = s.unstack(0)
assert_frame_equal(unstacked, expected)
# GH5873
idx = pd.MultiIndex.from_arrays([[101, 102], [3.5, np.nan]])
ts = pd.Series([1,2], index=idx)
left = ts.unstack()
right = DataFrame([[nan, 1], [2, nan]], index=[101, 102],
columns=[nan, 3.5])
print(left)
print(right)
assert_frame_equal(left, right)
idx = pd.MultiIndex.from_arrays([['cat', 'cat', 'cat', 'dog', 'dog'],
['a', 'a', 'b', 'a', 'b'], [1, 2, 1, 1, np.nan]])
ts = pd.Series([1.0, 1.1, 1.2, 1.3, 1.4], index=idx)
right = DataFrame([[1.0, 1.3], [1.1, nan], [nan, 1.4], [1.2, nan]],
columns=['cat', 'dog'])
tpls = [('a', 1), ('a', 2), ('b', nan), ('b', 1)]
right.index = pd.MultiIndex.from_tuples(tpls)
assert_frame_equal(ts.unstack(level=0), right)
def test_sortlevel(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
res = s.sortlevel('A')
assert_series_equal(backwards, res)
res = s.sortlevel(['A', 'B'])
assert_series_equal(backwards, res)
res = s.sortlevel('A', sort_remaining=False)
assert_series_equal(s, res)
res = s.sortlevel(['A', 'B'], sort_remaining=False)
assert_series_equal(s, res)
def test_head_tail(self):
assert_series_equal(self.series.head(), self.series[:5])
assert_series_equal(self.series.tail(), self.series[-5:])
def test_isin(self):
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
result = s.isin(['A', 'C'])
expected = Series([True, False, True, False, False, False, True, True])
assert_series_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH4763
s = Series(['A', 'B', 'C', 'a', 'B', 'B', 'A', 'C'])
with tm.assertRaises(TypeError):
s.isin('a')
with tm.assertRaises(TypeError):
s = Series(['aaa', 'b', 'c'])
s.isin('aaa')
def test_isin_with_i8(self):
# GH 5021
expected = Series([True,True,False,False,False])
expected2 = Series([False,True,False,False,False])
# datetime64[ns]
s = Series(date_range('jan-01-2013','jan-05-2013'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
result = s.isin(s[0:2].values)
assert_series_equal(result, expected)
# fails on dtype conversion in the first place
result = s.isin(s[0:2].values.astype('datetime64[D]'))
assert_series_equal(result, expected)
result = s.isin([s[1]])
assert_series_equal(result, expected2)
result = s.isin([np.datetime64(s[1])])
assert_series_equal(result, expected2)
# timedelta64[ns]
s = Series(pd.to_timedelta(lrange(5),unit='d'))
result = s.isin(s[0:2])
assert_series_equal(result, expected)
#------------------------------------------------------------------------------
# TimeSeries-specific
def test_cummethods_bool(self):
# GH 6270
# looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2
def cummin(x):
return np.minimum.accumulate(x)
def cummax(x):
return np.maximum.accumulate(x)
a = pd.Series([False, False, False, True, True, False, False])
b = ~a
c = pd.Series([False] * len(b))
d = ~c
methods = {'cumsum': np.cumsum, 'cumprod': np.cumprod,
'cummin': cummin, 'cummax': cummax}
args = product((a, b, c, d), methods)
for s, method in args:
expected = Series(methods[method](s.values))
result = getattr(s, method)()
assert_series_equal(result, expected)
e = pd.Series([False, True, nan, False])
cse = pd.Series([0, 1, nan, 1], dtype=object)
cpe = pd.Series([False, 0, nan, 0])
cmin = pd.Series([False, False, nan, False])
cmax = pd.Series([False, True, nan, True])
expecteds = {'cumsum': cse, 'cumprod': cpe, 'cummin': cmin,
'cummax': cmax}
for method in methods:
res = getattr(e, method)()
assert_series_equal(res, expecteds[method])
def test_replace(self):
N = 100
ser = Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
ser.replace([np.nan], -1, inplace=True)
exp = ser.fillna(-1)
assert_series_equal(ser, exp)
rs = ser.replace(0., np.nan)
ser[ser == 0.] = np.nan
assert_series_equal(rs, ser)
ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -1).all())
self.assertTrue((rs[20:30] == -1).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -2).all())
self.assertTrue((rs[20:30] == -3).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
self.assertTrue((ser[:5] == -1).all())
self.assertTrue((ser[6:10] == -1).all())
self.assertTrue((ser[20:30] == -1).all())
ser = Series([np.nan, 0, np.inf])
assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = Series([np.nan, 0, 'foo', 'bar', np.inf, None, lib.NaT])
assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
assert_series_equal(ser.replace(np.inf, 0), filled)
ser = Series(self.ts.index)
assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
self.assertRaises(ValueError, ser.replace, [1, 2, 3], [np.nan, 0])
# make sure that we aren't just masking a TypeError because bools don't
# implement indexing
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
ser.replace([1, 2], [np.nan, 0])
ser = Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
assert_series_equal(result, Series([4, 3, 2, 1, 0]))
# API change from 0.12?
# GH 5319
ser = Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
assert_series_equal(result, expected)
ser = Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
assert_series_equal(result, expected)
#GH 5797
ser = Series(date_range('20130101', periods=5))
expected = ser.copy()
expected.loc[2] = Timestamp('20120101')
result = ser.replace({Timestamp('20130103'):
Timestamp('20120101')})
assert_series_equal(result, expected)
result = ser.replace(Timestamp('20130103'), Timestamp('20120101'))
assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = Series([0, 1, 2, 3, 4])
result = ser.replace([1,2,3])
assert_series_equal(result, Series([0,0,0,0,4]))
s = ser.copy()
s.replace([1,2,3],inplace=True)
assert_series_equal(s, Series([0,0,0,0,4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
with tm.assertRaises(ValueError):
s.replace([1,2,3],inplace=True,method='crash_cymbal')
assert_series_equal(s, ser)
def test_replace_mixed_types(self):
s = Series(np.arange(5),dtype='int64')
def check_replace(to_rep, val, expected):
sc = s.copy()
r = s.replace(to_rep, val)
sc.replace(to_rep, val, inplace=True)
assert_series_equal(expected, r)
assert_series_equal(expected, sc)
# should NOT upcast to float
e = Series([0,1,2,3,4])
tr, v = [3], [3.0]
check_replace(tr, v, e)
# MUST upcast to float
e = Series([0,1,2,3.5,4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = Series([0,1,2,3.5,'a'])
tr, v = [3,4], [3.5,'a']
check_replace(tr, v, e)
# again casts to object
e = Series([0,1,2,3.5,Timestamp('20130101')])
tr, v = [3,4],[3.5,Timestamp('20130101')]
check_replace(tr, v, e)
# casts to float
e = Series([0,1,2,3.5,1])
tr, v = [3,4],[3.5,True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = date_range('1/1/2001', '1/10/2001',
freq='D').to_series().reset_index(drop=True)
result = dr.astype(object).replace([dr[0],dr[1],dr[2]], [1.0,2,'a'])
expected = Series([1.0,2,'a'] + dr[3:].tolist(),dtype=object)
assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = Series([True, False, True])
result = s.replace('fun', 'in-the-sun')
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = Series([True, False, True])
result = s.replace(True, '2u')
expected = Series(['2u', False, '2u'])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = Series([True, False, True])
result = s.replace(True, False)
expected = Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = Series([True, False, True])
with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):
s.replace({'asdf': 'asdb', True: 'yes'})
def test_asfreq(self):
ts = Series([0., 1., 2.], index=[datetime(2009, 10, 30),
datetime(2009, 11, 30),
datetime(2009, 12, 31)])
daily_ts = ts.asfreq('B')
monthly_ts = daily_ts.asfreq('BM')
self.assert_numpy_array_equal(monthly_ts, ts)
daily_ts = ts.asfreq('B', method='pad')
monthly_ts = daily_ts.asfreq('BM')
self.assert_numpy_array_equal(monthly_ts, ts)
daily_ts = ts.asfreq(datetools.bday)
monthly_ts = daily_ts.asfreq(datetools.bmonthEnd)
self.assert_numpy_array_equal(monthly_ts, ts)
result = ts[:0].asfreq('M')
self.assertEqual(len(result), 0)
self.assertIsNot(result, ts)
def test_diff(self):
# Just run the function
self.ts.diff()
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = s.diff()
self.assertEqual(rs[1], 1)
# neg n
rs = self.ts.diff(-1)
xp = self.ts - self.ts.shift(-1)
assert_series_equal(rs, xp)
# 0
rs = self.ts.diff(0)
xp = self.ts - self.ts
assert_series_equal(rs, xp)
# datetime diff (GH3100)
s = Series(date_range('20130102', periods=5))
rs = s - s.shift(1)
xp = s.diff()
assert_series_equal(rs, xp)
# timedelta diff
nrs = rs - rs.shift(1)
nxp = xp.diff()
assert_series_equal(nrs, nxp)
# with tz
s = Series(date_range('2000-01-01 09:00:00',periods=5,tz='US/Eastern'), name='foo')
result = s.diff()
assert_series_equal(result,Series(TimedeltaIndex(['NaT'] + ['1 days']*4),name='foo'))
def test_pct_change(self):
rs = self.ts.pct_change(fill_method=None)
assert_series_equal(rs, self.ts / self.ts.shift(1) - 1)
rs = self.ts.pct_change(2)
filled = self.ts.fillna(method='pad')
assert_series_equal(rs, filled / filled.shift(2) - 1)
rs = self.ts.pct_change(fill_method='bfill', limit=1)
filled = self.ts.fillna(method='bfill', limit=1)
assert_series_equal(rs, filled / filled.shift(1) - 1)
rs = self.ts.pct_change(freq='5D')
filled = self.ts.fillna(method='pad')
assert_series_equal(rs, filled / filled.shift(freq='5D') - 1)
def test_pct_change_shift_over_nas(self):
s = Series([1., 1.5, np.nan, 2.5, 3.])
chg = s.pct_change()
expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])
assert_series_equal(chg, expected)
def test_autocorr(self):
# Just run the function
corr1 = self.ts.autocorr()
# Now run it with the lag parameter
corr2 = self.ts.autocorr(lag=1)
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
self.assertTrue(np.isnan(corr1))
self.assertTrue(np.isnan(corr2))
else:
self.assertEqual(corr1, corr2)
# Choose a random lag between 1 and length of Series - 2
# and compare the result with the Series corr() function
n = 1 + np.random.randint(max(1, len(self.ts) - 2))
corr1 = self.ts.corr(self.ts.shift(n))
corr2 = self.ts.autocorr(lag=n)
# corr() with lag needs Series of at least length 2
if len(self.ts) <= 2:
self.assertTrue(np.isnan(corr1))
self.assertTrue(np.isnan(corr2))
else:
self.assertEqual(corr1, corr2)
def test_first_last_valid(self):
ts = self.ts.copy()
ts[:5] = np.NaN
index = ts.first_valid_index()
self.assertEqual(index, ts.index[5])
ts[-5:] = np.NaN
index = ts.last_valid_index()
self.assertEqual(index, ts.index[-6])
ts[:] = np.nan
self.assertIsNone(ts.last_valid_index())
self.assertIsNone(ts.first_valid_index())
ser = Series([], index=[])
self.assertIsNone(ser.last_valid_index())
self.assertIsNone(ser.first_valid_index())
def test_mpl_compat_hack(self):
result = self.ts[:, np.newaxis]
expected = self.ts.values[:, np.newaxis]
assert_almost_equal(result, expected)
#------------------------------------------------------------------------------
# GroupBy
def test_select(self):
n = len(self.ts)
result = self.ts.select(lambda x: x >= self.ts.index[n // 2])
expected = self.ts.reindex(self.ts.index[n // 2:])
assert_series_equal(result, expected)
result = self.ts.select(lambda x: x.weekday() == 2)
expected = self.ts[self.ts.index.weekday == 2]
assert_series_equal(result, expected)
#------------------------------------------------------------------------------
# Misc not safe for sparse
def test_dropna_preserve_name(self):
self.ts[:5] = np.nan
result = self.ts.dropna()
self.assertEqual(result.name, self.ts.name)
name = self.ts.name
ts = self.ts.copy()
ts.dropna(inplace=True)
self.assertEqual(ts.name, name)
def test_numpy_unique(self):
# it works!
result = np.unique(self.ts)
def test_concat_empty_series_dtypes_roundtrips(self):
# round-tripping with self & like self
dtypes = map(np.dtype,['float64','int8','uint8','bool','m8[ns]','M8[ns]'])
for dtype in dtypes:
self.assertEqual(pd.concat([Series(dtype=dtype)]).dtype, dtype)
self.assertEqual(pd.concat([Series(dtype=dtype),
Series(dtype=dtype)]).dtype, dtype)
def int_result_type(dtype, dtype2):
typs = set([dtype.kind,dtype2.kind])
if not len(typs-set(['i','u','b'])) and (dtype.kind == 'i' or dtype2.kind == 'i'):
return 'i'
elif not len(typs-set(['u','b'])) and (dtype.kind == 'u' or dtype2.kind == 'u'):
return 'u'
return None
def float_result_type(dtype, dtype2):
typs = set([dtype.kind,dtype2.kind])
if not len(typs-set(['f','i','u'])) and (dtype.kind == 'f' or dtype2.kind == 'f'):
return 'f'
return None
def get_result_type(dtype, dtype2):
result = float_result_type(dtype, dtype2)
if result is not None:
return result
result = int_result_type(dtype, dtype2)
if result is not None:
return result
return 'O'
for dtype in dtypes:
for dtype2 in dtypes:
if dtype == dtype2:
continue
expected = get_result_type(dtype, dtype2)
result = pd.concat([Series(dtype=dtype),
Series(dtype=dtype2)]).dtype
self.assertEqual(result.kind, expected)
def test_concat_empty_series_dtypes(self):
# bools
self.assertEqual(pd.concat([Series(dtype=np.bool_),
Series(dtype=np.int32)]).dtype, np.int32)
self.assertEqual(pd.concat([Series(dtype=np.bool_),
Series(dtype=np.float32)]).dtype, np.object_)
# datetimelike
self.assertEqual(pd.concat([Series(dtype='m8[ns]'),
Series(dtype=np.bool)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='m8[ns]'),
Series(dtype=np.int64)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.bool)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.int64)]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='M8[ns]'),
Series(dtype=np.bool_),
Series(dtype=np.int64)]).dtype, np.object_)
# categorical
self.assertEqual(pd.concat([Series(dtype='category'),
Series(dtype='category')]).dtype, 'category')
self.assertEqual(pd.concat([Series(dtype='category'),
Series(dtype='float64')]).dtype, np.object_)
self.assertEqual(pd.concat([Series(dtype='category'),
Series(dtype='object')]).dtype, 'category')
# sparse
result = pd.concat([Series(dtype='float64').to_sparse(),
Series(dtype='float64').to_sparse()])
self.assertEqual(result.dtype,np.float64)
self.assertEqual(result.ftype,'float64:sparse')
result = pd.concat([Series(dtype='float64').to_sparse(),
Series(dtype='float64')])
self.assertEqual(result.dtype,np.float64)
self.assertEqual(result.ftype,'float64:sparse')
result = pd.concat([Series(dtype='float64').to_sparse(),
Series(dtype='object')])
self.assertEqual(result.dtype,np.object_)
self.assertEqual(result.ftype,'object:dense')
def test_searchsorted_numeric_dtypes_scalar(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted(30)
e = 2
tm.assert_equal(r, e)
r = s.searchsorted([30])
e = np.array([2])
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_numeric_dtypes_vector(self):
s = Series([1, 2, 90, 1000, 3e9])
r = s.searchsorted([91, 2e6])
e = np.array([3, 4])
tm.assert_numpy_array_equal(r, e)
def test_search_sorted_datetime64_scalar(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = pd.Timestamp('20120102')
r = s.searchsorted(v)
e = 1
tm.assert_equal(r, e)
def test_search_sorted_datetime64_list(self):
s = Series(pd.date_range('20120101', periods=10, freq='2D'))
v = [pd.Timestamp('20120102'), pd.Timestamp('20120104')]
r = s.searchsorted(v)
e = np.array([1, 2])
tm.assert_numpy_array_equal(r, e)
def test_searchsorted_sorter(self):
# GH8490
s = Series([3, 1, 2])
r = s.searchsorted([0, 3], sorter=np.argsort(s))
e = np.array([0, 2])
tm.assert_numpy_array_equal(r, e)
def test_to_frame_expanddim(self):
# GH 9762
class SubclassedSeries(Series):
@property
def _constructor_expanddim(self):
return SubclassedFrame
class SubclassedFrame(DataFrame):
pass
s = SubclassedSeries([1, 2, 3], name='X')
result = s.to_frame()
self.assertTrue(isinstance(result, SubclassedFrame))
expected = SubclassedFrame({'X': [1, 2, 3]})
assert_frame_equal(result, expected)
class TestSeriesNonUnique(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
pass
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
self.assertRaises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
self.assertRaises(IndexError, s.__getitem__, 5)
self.assertRaises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
self.assertRaises(KeyError, s.__getitem__, 5)
self.assertRaises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
self.assertRaises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
self.assertEqual(s[stamp], 0)
def test_reset_index(self):
df = tm.makeDataFrame()[:5]
ser = df.stack()
ser.index.names = ['hash', 'category']
ser.name = 'value'
df = ser.reset_index()
self.assertIn('value', df)
df = ser.reset_index(name='value2')
self.assertIn('value2', df)
# check inplace
s = ser.reset_index(drop=True)
s2 = ser
s2.reset_index(drop=True, inplace=True)
assert_series_equal(s, s2)
# level
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
s = Series(np.random.randn(6), index=index)
rs = s.reset_index(level=1)
self.assertEqual(len(rs.columns), 2)
rs = s.reset_index(level=[0, 2], drop=True)
self.assertTrue(rs.index.equals(Index(index.get_level_values(1))))
tm.assertIsInstance(rs, Series)
def test_set_index_makes_timeseries(self):
idx = tm.makeDateIndex(10)
s = Series(lrange(10))
s.index = idx
with tm.assert_produces_warning(FutureWarning):
self.assertTrue(s.is_time_series == True)
self.assertTrue(s.index.is_all_dates == True)
def test_timeseries_coercion(self):
idx = tm.makeDateIndex(10000)
ser = Series(np.random.randn(len(idx)), idx.astype(object))
with tm.assert_produces_warning(FutureWarning):
self.assertTrue(ser.is_time_series)
self.assertTrue(ser.index.is_all_dates)
self.assertIsInstance(ser.index, DatetimeIndex)
def test_replace(self):
N = 100
ser = Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -1).all())
self.assertTrue((rs[20:30] == -1).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
self.assertTrue((rs[:5] == -1).all())
self.assertTrue((rs[6:10] == -2).all())
self.assertTrue((rs[20:30] == -3).all())
self.assertTrue((isnull(ser[:5])).all())
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
self.assertTrue((ser[:5] == -1).all())
self.assertTrue((ser[6:10] == -1).all())
self.assertTrue((ser[20:30] == -1).all())
def test_repeat(self):
s = Series(np.random.randn(3), index=['a', 'b', 'c'])
reps = s.repeat(5)
exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))
assert_series_equal(reps, exp)
to_rep = [2, 3, 4]
reps = s.repeat(to_rep)
exp = Series(s.values.repeat(to_rep),
index=s.index.values.repeat(to_rep))
assert_series_equal(reps, exp)
def test_unique_data_ownership(self):
# it works! #1807
Series(Series(["a", "c", "b"]).unique()).sort_values()
def test_datetime_timedelta_quantiles(self):
# covers #9694
self.assertTrue(pd.isnull(Series([],dtype='M8[ns]').quantile(.5)))
self.assertTrue(pd.isnull(Series([],dtype='m8[ns]').quantile(.5)))
def test_empty_timeseries_redections_return_nat(self):
# covers #11245
for dtype in ('m8[ns]', 'm8[ns]', 'M8[ns]', 'M8[ns, UTC]'):
self.assertIs(Series([], dtype=dtype).min(), pd.NaT)
self.assertIs(Series([], dtype=dtype).max(), pd.NaT)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-2.0 |
alphaBenj/zipline | zipline/utils/security_list.py | 6 | 5399 | import warnings
from datetime import datetime
from os import listdir
import os.path
import pandas as pd
import pytz
import zipline
from zipline.errors import SymbolNotFound
from zipline.finance.asset_restrictions import SecurityListRestrictions
from zipline.zipline_warnings import ZiplineDeprecationWarning
DATE_FORMAT = "%Y%m%d"
zipline_dir = os.path.dirname(zipline.__file__)
SECURITY_LISTS_DIR = os.path.join(zipline_dir, 'resources', 'security_lists')
class SecurityList(object):
def __init__(self, data, current_date_func, asset_finder):
"""
data: a nested dictionary:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': []}, delete: [symbol list]}
current_date_func: function taking no parameters, returning
current datetime
"""
self.data = data
self._cache = {}
self._knowledge_dates = self.make_knowledge_dates(self.data)
self.current_date = current_date_func
self.count = 0
self._current_set = set()
self.asset_finder = asset_finder
def make_knowledge_dates(self, data):
knowledge_dates = sorted(
[pd.Timestamp(k) for k in data.keys()])
return knowledge_dates
def __iter__(self):
warnings.warn(
'Iterating over security_lists is deprecated. Use '
'`for sid in <security_list>.current_securities(dt)` instead.',
category=ZiplineDeprecationWarning,
stacklevel=2
)
return iter(self.current_securities(self.current_date()))
def __contains__(self, item):
warnings.warn(
'Evaluating inclusion in security_lists is deprecated. Use '
'`sid in <security_list>.current_securities(dt)` instead.',
category=ZiplineDeprecationWarning,
stacklevel=2
)
return item in self.current_securities(self.current_date())
def current_securities(self, dt):
for kd in self._knowledge_dates:
if dt < kd:
break
if kd in self._cache:
self._current_set = self._cache[kd]
continue
for effective_date, changes in iter(self.data[kd].items()):
self.update_current(
effective_date,
changes['add'],
self._current_set.add
)
self.update_current(
effective_date,
changes['delete'],
self._current_set.remove
)
self._cache[kd] = self._current_set
return self._current_set
def update_current(self, effective_date, symbols, change_func):
for symbol in symbols:
try:
asset = self.asset_finder.lookup_symbol(
symbol,
as_of_date=effective_date
)
# Pass if no Asset exists for the symbol
except SymbolNotFound:
continue
change_func(asset.sid)
class SecurityListSet(object):
# provide a cut point to substitute other security
# list implementations.
security_list_type = SecurityList
def __init__(self, current_date_func, asset_finder):
self.current_date_func = current_date_func
self.asset_finder = asset_finder
self._leveraged_etf = None
@property
def leveraged_etf_list(self):
if self._leveraged_etf is None:
self._leveraged_etf = self.security_list_type(
load_from_directory('leveraged_etf_list'),
self.current_date_func,
asset_finder=self.asset_finder
)
return self._leveraged_etf
@property
def restrict_leveraged_etfs(self):
return SecurityListRestrictions(self.leveraged_etf_list)
def load_from_directory(list_name):
"""
To resolve the symbol in the LEVERAGED_ETF list,
the date on which the symbol was in effect is needed.
Furthermore, to maintain a point in time record of our own maintenance
of the restricted list, we need a knowledge date. Thus, restricted lists
are dictionaries of datetime->symbol lists.
new symbols should be entered as a new knowledge date entry.
This method assumes a directory structure of:
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/add.txt
SECURITY_LISTS_DIR/listname/knowledge_date/lookup_date/delete.txt
The return value is a dictionary with:
knowledge_date -> lookup_date ->
{add: [symbol list], 'delete': [symbol list]}
"""
data = {}
dir_path = os.path.join(SECURITY_LISTS_DIR, list_name)
for kd_name in listdir(dir_path):
kd = datetime.strptime(kd_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd] = {}
kd_path = os.path.join(dir_path, kd_name)
for ld_name in listdir(kd_path):
ld = datetime.strptime(ld_name, DATE_FORMAT).replace(
tzinfo=pytz.utc)
data[kd][ld] = {}
ld_path = os.path.join(kd_path, ld_name)
for fname in listdir(ld_path):
fpath = os.path.join(ld_path, fname)
with open(fpath) as f:
symbols = f.read().splitlines()
data[kd][ld][fname] = symbols
return data
| apache-2.0 |
psathyrella/partis | test/cf-tree-metrics.py | 1 | 70896 | #!/usr/bin/env python
import argparse
import operator
import os
import sys
import yaml
import json
import colored_traceback.always
import collections
import numpy
import math
import subprocess
import multiprocessing
# ----------------------------------------------------------------------------------------
linestyles = {'lbi' : '-', 'lbr' : '-', 'dtr' : '--'}
linewidths = {'lbi' : 2.5, 'lbr' : 2.5, 'dtr' : 3}
hard_colors = {'dtr' : '#626262',
'aa-lbi' : '#e043b9',
'aa-lbr' : '#e043b9'} # don't like the cycle colors these end up with
def metric_color(metric): # as a fcn to avoid import if we're not plotting
if metric in hard_colors:
return hard_colors[metric]
mstrlist = ['shm:lbi:cons-dist-aa:cons-dist-nuc:dtr:aa-lbi', 'delta-lbi:lbr:dtr:aa-lbr']
metric_colors = {m : plotting.frozen_pltcolors[i % len(plotting.frozen_pltcolors)] for mstrs in mstrlist for i, m in enumerate(mstrs.split(':'))}
return metric_colors.get(metric, 'red')
# ----------------------------------------------------------------------------------------
def ura_vals(xvar): # list of whether we're using relative affinity values
if xvar == 'affinity' and args.include_relative_affy_plots:
return [False, True] # i.e. [don'e use relative affy, do use relative affy]
else:
return [False]
# ----------------------------------------------------------------------------------------
def get_n_generations(ntl, tau): # NOTE duplicates code in treeutils.calculate_max_lbi()
return max(1, int(args.seq_len * tau * ntl))
# ----------------------------------------------------------------------------------------
def get_outfname(outdir):
return '%s/vals.yaml' % outdir
# ----------------------------------------------------------------------------------------
def make_lb_bound_plots(args, outdir, metric, btype, parsed_info, print_results=False):
def make_plot(log, parsed_info):
fig, ax = plotting.mpl_init()
for lbt in sorted(parsed_info[metric][btype], reverse=True):
n_gen_list, lb_list = zip(*sorted(parsed_info[metric][btype][lbt].items(), key=operator.itemgetter(0)))
if lbt == 1. / args.seq_len: # add a horizontal line corresponding to the asymptote for tau = 1/seq_len
ax.plot(ax.get_xlim(), (lb_list[-1], lb_list[-1]), linewidth=3, alpha=0.7, color='darkred', linestyle='--') #, label='1/seq len')
ax.plot(n_gen_list, lb_list, label='%.4f' % lbt, alpha=0.7, linewidth=4)
if print_results and log == '':
print ' %7.4f %6.4f' % (lbt, lb_list[-1])
plotname = 'tau-vs-n-gen-vs-%s-%s' % (btype, metric)
if log == '':
ybounds = None
leg_loc = (0.1, 0.5)
else:
plotname += '-log'
if metric == 'lbi':
ybounds = (2*min(parsed_info[metric][btype]), 3*ax.get_ylim()[1])
else:
ybounds = None
leg_loc = (0.04, 0.57)
plotting.mpl_finish(ax, outdir, plotname, log=log, xbounds=(min(n_gen_list), max(n_gen_list)), ybounds=ybounds,
xlabel='N generations', ylabel='%s %s' % (btype.capitalize(), metric.upper()), leg_title='tau', leg_prop={'size' : 12}, leg_loc=leg_loc)
if print_results:
print '%s: tau %s %s' % (btype, btype, metric)
for log in ['', 'y']:
make_plot(log, parsed_info)
# ----------------------------------------------------------------------------------------
def calc_lb_bounds(args, n_max_gen_to_plot=4, lbt_bounds=(0.001, 0.005), print_results=False):
print_results = True
btypes = ['min', 'max']
outdir = '%s/lb-tau-normalization/%s' % (args.base_outdir, args.label)
parsed_info = {m : {b : {} for b in btypes} for m in args.only_metrics}
for lbt in args.lb_tau_list:
if args.make_plots and (lbt < lbt_bounds[0] or lbt > lbt_bounds[1]):
print ' skipping tau %.4f outside of bounds [%.4f, %.4f] for bound plotting' % (lbt, lbt_bounds[0], lbt_bounds[1])
continue
gen_list = args.n_generations_list
if gen_list is None:
gen_list = [get_n_generations(ntl, lbt) for ntl in args.n_tau_lengths_list]
if args.lb_tau_list.index(lbt) == 0 or args.n_tau_lengths_list is not None: # if --n-tau-lengths-list is set, they could be different for each tau
print ' seq len: %d N gen list: %s' % (args.seq_len, ' '.join(str(n) for n in gen_list))
print ' %s %.4f' % (utils.color('green', 'lb tau'), lbt)
for n_gen in gen_list:
if args.debug:
print ' %s %d %s %.4f' % (utils.color('purple', 'N gen'), n_gen, utils.color('purple', 'lb tau'), lbt)
this_outdir = '%s/n_gen-%d-lbt-%.4f' % (outdir, n_gen, lbt) # if for some reason I want to specify --n-tau-lengths-list instead of --n-generations-list, this makes the output path structure still correspond to n generations, but that's ok since that's what the trees do
if args.make_plots: # just let it crash if you forgot to actually run it first
with open(get_outfname(this_outdir)) as outfile:
info = yaml.load(outfile, Loader=yaml.Loader)
for metric in args.only_metrics:
for btype in btypes:
if lbt not in parsed_info[metric][btype]:
parsed_info[metric][btype][lbt] = {}
parsed_info[metric][btype][lbt][n_gen] = info[metric][btype][metric] # it's weird to have metric as the key twice, but it actually makes sense since we're subverting the normal calculation infrastructure to only run one metric or the other at a time (i.e. the righthand key is pulling out the metric we want from the lb info that, in principle, usually has both; while the lefthand key is identifying a run during which we were only caring about that metric)
continue
elif utils.output_exists(args, get_outfname(this_outdir)):
continue
print ' running n gen %d' % n_gen
if not os.path.exists(this_outdir):
os.makedirs(this_outdir)
lbvals = treeutils.calculate_lb_bounds(args.seq_len, lbt, n_generations=n_gen, n_offspring=args.max_lb_n_offspring, only_metrics=args.only_metrics, btypes=btypes, debug=args.debug)
with open(get_outfname(this_outdir), 'w') as outfile:
yamlfo = {m : {b : {k : v for k, v in lbvals[m][b].items() if k != 'vals'} for b in btypes} for m in args.only_metrics} # writing these to yaml is really slow, and they're only used for plotting below
yaml.dump(yamlfo, outfile)
if n_gen > n_max_gen_to_plot:
continue
# this is really slow on large trees
plotdir = this_outdir + '/plots'
utils.prep_dir(plotdir, wildlings='*.svg')
for metric in args.only_metrics:
for btype in btypes:
if lbvals[metric][btype]['vals'] is None:
continue
cmdfos = [lbplotting.get_lb_tree_cmd(lbvals[metric][btype]['vals']['tree'], '%s/%s-%s-tree.svg' % (plotdir, metric, btype), metric, 'affinities', args.ete_path, args.workdir, metafo=lbvals[metric][btype]['vals'], tree_style='circular')]
utils.run_cmds(cmdfos, clean_on_success=True, shell=True, debug='print')
if args.make_plots:
print ' writing plots to %s' % outdir
for metric in args.only_metrics:
for btype in btypes:
if 'lbr' in metric and btype == 'min': # it's just zero, and confuses the log plots
continue
if len(parsed_info[metric][btype]) == 0:
print 'nothing to do (<parsed_info> empty)'
continue
make_lb_bound_plots(args, outdir, metric, btype, parsed_info, print_results=print_results)
# ----------------------------------------------------------------------------------------
def get_outdir(varnames, vstr, svtype):
assert len(varnames) == len(vstr)
outdir = [args.base_outdir, args.label]
for vn, vstr in zip(varnames, vstr):
if vn not in args.scan_vars[svtype]: # e.g. lb tau, which is only for lb calculation
continue
outdir.append('%s-%s' % (vn, vstr))
return '/'.join(outdir)
# ----------------------------------------------------------------------------------------
def get_bcr_phylo_outdir(varnames, vstr):
return get_outdir(varnames, vstr, 'simu') + '/bcr-phylo'
# ----------------------------------------------------------------------------------------
def get_simfname(varnames, vstr):
return '%s/selection/simu/mutated-simu.yaml' % get_bcr_phylo_outdir(varnames, vstr)
# ----------------------------------------------------------------------------------------
def get_parameter_dir(varnames, vstr):
return '%s/selection/partis/params' % get_bcr_phylo_outdir(varnames, vstr)
# ----------------------------------------------------------------------------------------
def get_tree_metric_outdir(varnames, vstr, metric_method=None): # metric_method is only set if it's neither lbi nor lbr
return get_outdir(varnames, vstr, 'get-tree-metrics') + '/' + ('partis' if metric_method is None else metric_method)
# ----------------------------------------------------------------------------------------
def get_partition_fname(varnames, vstr, action, metric_method=None): # if action is 'bcr-phylo', we want the original partition output file, but if it's 'get-tree-metrics', we want the copied one, that had tree metrics added to it (and which is in the e.g. tau subdir) UPDATE no longer modifying output files by default, so no longer doing the copying thing
if action == 'bcr-phylo' or metric_method is not None: # for non-lb metrics (i.e. if metric_method is set) we won't modify the partition file, so can just read the one in the bcr-phylo dir
outdir = '%s/selection/partis' % get_bcr_phylo_outdir(varnames, vstr)
else:
outdir = get_tree_metric_outdir(varnames, vstr)
return '%s/partition.yaml' % outdir
# ----------------------------------------------------------------------------------------
def get_tree_metric_plotdir(varnames, vstr, metric_method=None, extra_str=''):
return '%s/%splots' % (get_tree_metric_outdir(varnames, vstr, metric_method=metric_method), extra_str+'-' if extra_str != '' else '')
# ----------------------------------------------------------------------------------------
def get_dtr_model_dir(varnames, vstr, extra_str=''):
plotdir = get_tree_metric_plotdir(varnames, vstr, metric_method='dtr', extra_str=extra_str)
if plotdir.split('/')[-1] == 'plots':
assert extra_str == '' # i think?
delim = '/'
elif plotdir.split('-')[-1] == 'plots': # i.e. args.extra_plotstr was set when we trained
assert extra_str != '' # i think?
delim = '-'
else:
assert False
plstr = delim + 'plots'
assert plotdir.count(plstr) == 1
return plotdir.replace(plstr, delim + 'dtr-models')
# ----------------------------------------------------------------------------------------
def rel_affy_str(use_relative_affy, metric):
return '-relative' if use_relative_affy and metric == 'lbi' else ''
# ----------------------------------------------------------------------------------------
def get_tm_fname(varnames, vstr, metric, x_axis_label, use_relative_affy=False, cg=None, tv=None, extra_str=''): # note that there are separate svg files for each iclust, but info for all clusters are written to the same yaml file (but split apart with separate info for each cluster)
if metric == 'dtr':
assert cg is not None and tv is not None
if metric in ['lbi', 'lbr']: # NOTE using <metric> and <metric_method> for slightly different but overlapping things: former is the actual metric name, whereas setting the latter says we want a non-lb metric (necessary because by default we want to calculate lbi and lbr, but also be able treat lbi and lbr separately when plotting)
plotdir = get_tree_metric_plotdir(varnames, vstr, extra_str=extra_str)
old_path = '%s/true-tree-metrics/%s-vs-%s-true-tree-ptiles%s.yaml' % (plotdir, metric, x_axis_label, rel_affy_str(use_relative_affy, metric)) # just for backwards compatibility, could probably remove at some point (note: not updating this when I'm adding non-lb metrics like shm)
if os.path.exists(old_path):
return old_path
else:
plotdir = get_tree_metric_plotdir(varnames, vstr, metric_method=metric, extra_str=extra_str)
return treeutils.tmfname(plotdir, metric, x_axis_label, cg=cg, tv=tv, use_relative_affy=use_relative_affy)
# ----------------------------------------------------------------------------------------
def get_all_tm_fnames(varnames, vstr, metric_method=None, extra_str=''):
if metric_method is None:
return [get_tm_fname(varnames, vstr, mtmp, xatmp, use_relative_affy=use_relative_affy, extra_str=extra_str)
for mtmp, cfglist in lbplotting.lb_metric_axis_cfg(args.metric_method)
for xatmp, _ in cfglist
for use_relative_affy in ura_vals(xatmp)] # arg wow that's kind of complicated and ugly
elif metric_method == 'dtr':
if args.train_dtr: # training
return [treeutils.dtrfname(get_dtr_model_dir(varnames, vstr, extra_str=extra_str), cg, tvar) for cg in treeutils.cgroups for tvar in treeutils.dtr_targets[cg]]
else: # testing
return [get_tm_fname(varnames, vstr, metric_method, lbplotting.getptvar(tv), cg=cg, tv=tv, use_relative_affy=use_relative_affy, extra_str=extra_str) for cg in treeutils.cgroups for tv in treeutils.dtr_targets[cg] for use_relative_affy in ura_vals(tv)]
else:
return [get_tm_fname(varnames, vstr, metric_method, 'n-ancestor' if metric_method in ['delta-lbi', 'aa-lbr'] else 'affinity', extra_str=extra_str)] # this hard coding sucks, and it has to match some stuff in treeutils.calculate_non_lb_tree_metrics()
# ----------------------------------------------------------------------------------------
def get_comparison_plotdir(metric, per_x, extra_str=''): # both <metric> and <per_x> can be None, in which case we return the parent dir
plotdir = '%s/%s/plots' % (args.base_outdir, args.label)
if metric is not None:
plotdir += '/' + metric
if metric == 'combined' and args.combo_extra_str is not None:
plotdir += '-' + args.combo_extra_str
if extra_str != '':
assert metric is not None
plotdir += '_' + extra_str
if per_x is not None:
plotdir += '/' + per_x
return plotdir
# ----------------------------------------------------------------------------------------
def getsargval(sv): # ick this name sucks
def dkey(sv):
return sv.replace('-', '_') + '_list'
if sv == 'seed':
riter = range(args.n_replicates) if args.iseeds is None else args.iseeds
return [args.random_seed + i for i in riter]
else:
return args.__dict__[dkey(sv)]
# ----------------------------------------------------------------------------------------
def get_vlval(vlists, varnames, vname): # ok this name also sucks, but they're doing complicated things while also needing really short names...
# NOTE I think <vlist> would be more appropriate than <vlists>
if vname in varnames:
return vlists[varnames.index(vname)]
else:
assert len(getsargval(vname)) # um, I think?
return getsargval(vname)[0]
# ----------------------------------------------------------------------------------------
def get_var_info(args, scan_vars):
def handle_var(svar, val_lists, valstrs):
convert_fcn = str if svar in ['carry-cap', 'seed', 'metric-for-target-distance', 'paratope-positions', 'parameter-variances', 'selection-strength', 'leaf-sampling-scheme', 'target-count', 'n-target-clusters', 'min-target-distance', 'lb-tau'] else lambda vlist: ':'.join(str(v) for v in vlist)
sargv = getsargval(svar)
if sargv is None: # no default value, and it wasn't set on the command line
pass
elif len(sargv) > 1 or (svar == 'seed' and args.iseeds is not None): # if --iseeds is set, then we know there must be more than one replicate, but/and we also know the fcn will only be returning one of 'em
varnames.append(svar)
val_lists = [vlist + [sv] for vlist in val_lists for sv in sargv]
valstrs = [vlist + [convert_fcn(sv)] for vlist in valstrs for sv in sargv]
else:
base_args.append('--%s %s' % (svar, convert_fcn(sargv[0])))
return val_lists, valstrs
base_args = []
varnames = []
val_lists, valstrs = [[]], [[]]
for svar in scan_vars:
val_lists, valstrs = handle_var(svar, val_lists, valstrs)
if args.zip_vars is not None:
if args.debug:
print ' zipping values for %s' % ' '.join(args.zip_vars)
assert len(args.zip_vars) == 2 # nothing wrong with more, but I don't feel like testing it right now
assert len(getsargval(args.zip_vars[0])) == len(getsargval(args.zip_vars[1])) # doesn't make sense unless you provide a corresponding value for each
ok_zipvals = zip(getsargval(args.zip_vars[0]), getsargval(args.zip_vars[1]))
zval_lists, zvalstrs = [], [] # new ones, only containing zipped values
for vlist, vstrlist in zip(val_lists, valstrs):
zvals = tuple([get_vlval(vlist, varnames, zv) for zv in args.zip_vars]) # values for this combo of the vars we want to zip
if zvals in ok_zipvals and vlist not in zval_lists: # second clause is to avoid duplicates (duh), which we get because when we're zipping vars we have to allow duplicate vals in each zip'd vars arg list, and then (above) we make combos including all those duplicate combos
zval_lists.append(vlist)
zvalstrs.append(vstrlist)
val_lists = zval_lists
valstrs = zvalstrs
if any(valstrs.count(vstrs) > 1 for vstrs in valstrs):
raise Exception('duplicate combinations for %s' % ' '.join(':'.join(vstr) for vstr in valstrs if valstrs.count(vstr) > 1))
return base_args, varnames, val_lists, valstrs
# ----------------------------------------------------------------------------------------
def make_plots(args, action, metric, per_x, choice_grouping, ptilestr, ptilelabel, xvar, min_ptile_to_plot=75., use_relative_affy=False, metric_extra_str='', xdelim='_XTRA_', debug=False):
if metric == 'lbr' and args.dont_observe_common_ancestors:
print ' skipping lbr when only observing leaves'
return
affy_key_str = 'relative-' if (use_relative_affy and ptilestr=='affinity') else '' # NOTE somewhat duplicates lbplotting.rel_affy_str()
treat_clusters_together = args.n_sim_events_per_proc is None or (per_x == 'per-seq' and choice_grouping == 'among-families') # if either there's only one family per proc, or we're choosing cells among all the clusters in a proc together, then things here generally work as if there were only one family per proc (note that I think I don't need the 'per-seq' since it shouldn't be relevant for 'per-cluster', but it makes it clearer what's going on)
vlabels = {
'obs_frac' : 'fraction sampled',
'n-sim-seqs-per-gen' : 'N/gen',
'obs-times' : 't obs',
'carry-cap' : 'carry cap',
}
treeutils.legtexts.update(lbplotting.metric_for_target_distance_labels)
def legstr(label, title=False):
if label is None: return None
jstr = '\n' if title else '; '
tmplist = [treeutils.legtexts.get(l, l.replace('-', ' ')) for l in label.split('; ')]
if title and args.pvks_to_plot is not None: # if we're only plotting specific values, put them in the legend str (typically we're just plotting one value)
assert isinstance(args.pvks_to_plot, list) # don't really need this
for il in range(len(tmplist)):
subpvks = [pvk.split('; ')[il] for pvk in args.pvks_to_plot]
tmplist[il] += ': %s' % ' '.join(treeutils.legtexts.get(spvk, spvk) for spvk in subpvks)
lstr = jstr.join(tmplist)
return lstr
pvlabel = ['?'] # arg, this is ugly (but it does work...)
# ----------------------------------------------------------------------------------------
def get_obs_frac(vlists, varnames):
obs_times = get_vlval(vlists, varnames, 'obs-times')
n_per_gen_vals = get_vlval(vlists, varnames, 'n-sim-seqs-per-gen')
if len(obs_times) == len(n_per_gen_vals): # note that this duplicates logic in bcr-phylo simulator.py
n_sampled = sum(n_per_gen_vals)
elif len(n_per_gen_vals) == 1:
n_sampled = len(obs_times) * n_per_gen_vals[0]
else:
assert False
n_total = get_vlval(vlists, varnames, 'carry-cap') # note that this is of course the number alive at a given time, and very different from the total number that ever lived
obs_frac = n_sampled / float(n_total)
dbgstr = ' %-12s %-12s %-5d %8s / %-4d = %.3f' % (' '.join(str(o) for o in obs_times), ' '.join(str(n) for n in n_per_gen_vals), n_total,
('(%s)' % ' + '.join(str(n) for n in n_per_gen_vals)) if len(obs_times) == len(n_per_gen_vals) else ('%d * %d' % (len(obs_times), n_per_gen_vals[0])),
n_total, n_sampled / float(n_total))
return obs_frac, dbgstr
# ----------------------------------------------------------------------------------------
def pvkeystr(vlists, varnames, obs_frac):
def valstr(vname):
vval = obs_frac if vname == 'obs_frac' else get_vlval(vlists, varnames, vname)
if vname == 'obs_frac':
return '%.4f' % obs_frac
else:
def strfcn(x):
return str(x) # TODO
if isinstance(vval, list):
return ', '.join(strfcn(v) for v in vval)
else:
return strfcn(vval)
pvnames = sorted(set(varnames) - set(['seed', xvar]))
if args.legend_var is not None: # pvnames == ['n-sim-seqs-per-gen']: # if this is the only thing that's different between different runs (except for the x variable and seed/replicate) then we want to use obs_frac
pvnames = [args.legend_var] # ['obs_frac']
pvkey = '; '.join(valstr(vn) for vn in pvnames) # key identifying each line of a different color
pvlabel[0] = '; '.join(vlabels.get(vn, vn) for vn in pvnames)
return pvkey
# ----------------------------------------------------------------------------------------
def get_ytmpfo(yamlfo, iclust=None):
if 'percentiles' in yamlfo: # new-style files
ytmpfo = yamlfo['percentiles']
if per_x == 'per-seq':
ytmpfo = ytmpfo['per-seq']['all-clusters' if iclust is None else 'iclust-%d' % iclust]
else:
ytmpfo = ytmpfo['per-cluster'][choice_grouping]
else: # old-style files
ytmpfo = yamlfo
if iclust is not None:
if 'iclust-%d' % iclust not in ytmpfo:
print ' %s requested per-cluster ptile vals, but they\'re not in the yaml file (probably just an old file)' % utils.color('yellow', 'warning') # I think it's just going to crash on the next line anyway
ytmpfo = ytmpfo['iclust-%d' % iclust]
return ytmpfo
# ----------------------------------------------------------------------------------------
def yval_key(ytmpfo):
if ptilestr == 'affinity' and 'mean_affy_ptiles' in ytmpfo: # old-style files used shortened version
return 'mean_affy_ptiles'
else:
return 'mean_%s_ptiles' % ptilestr
# ----------------------------------------------------------------------------------------
def get_diff_vals(ytmpfo, iclust=None):
ytmpfo = get_ytmpfo(ytmpfo, iclust=iclust)
return [abs(pafp - afp) for lbp, afp, pafp in zip(ytmpfo['lb_ptiles'], ytmpfo[yval_key(ytmpfo)], ytmpfo['perfect_vals']) if lbp > min_ptile_to_plot]
# ----------------------------------------------------------------------------------------
def get_varname_str():
return ''.join('%10s' % vlabels.get(v, v) for v in varnames)
def get_varval_str(vstrs):
return ''.join(' %9s' % v for v in vstrs)
# ----------------------------------------------------------------------------------------
def read_plot_info():
# ----------------------------------------------------------------------------------------
def add_plot_vals(ytmpfo, vlists, varnames, obs_frac, iclust=None):
def getikey():
if args.n_replicates == 1 and treat_clusters_together:
ikey = None
def initfcn(): return [] # i swear it initially made more sense for this to be such a special case
elif args.n_replicates == 1: # but more than one event per proc
ikey = iclust
def initfcn(): return {i : [] for i in range(args.n_sim_events_per_proc)}
elif treat_clusters_together: # but more than one replicate/seed
ikey = vlists[varnames.index('seed')]
def initfcn(): return {i : [] for i in getsargval('seed')}
else: # both of 'em non-trivial
ikey = '%d-%d' % (vlists[varnames.index('seed')], iclust)
def initfcn(): return {('%d-%d' % (i, j)) : [] for i in getsargval('seed') for j in range(args.n_sim_events_per_proc)}
return ikey, initfcn
diff_vals = get_diff_vals(ytmpfo, iclust=iclust)
if len(diff_vals) == 0:
missing_vstrs['empty'].append((iclust, vstrs)) # empty may be from empty list in yaml file, or may be from none of them being above <min_ptile_to_plot>
return
diff_to_perfect = numpy.mean(diff_vals)
tau = get_vlval(vlists, varnames, xvar) # not necessarily tau anymore
ikey, initfcn = getikey()
pvkey = pvkeystr(vlists, varnames, obs_frac) # key identifying each line in the plot, each with a different color, (it's kind of ugly to get the label here but not use it til we plot, but oh well)
if pvkey not in plotvals:
plotvals[pvkey] = initfcn()
plotlist = plotvals[pvkey][ikey] if ikey is not None else plotvals[pvkey] # it would be nice if the no-replicate-families-together case wasn't treated so differently
plotlist.append((tau, diff_to_perfect)) # NOTE this appends to plotvals, the previous line is just to make sure we append to the right place
# ----------------------------------------------------------------------------------------
if debug:
print '%s | obs times N/gen carry cap fraction sampled' % get_varname_str()
missing_vstrs = {'missing' : [], 'empty' : []}
for vlists, vstrs in zip(val_lists, valstrs): # why is this called vstrs rather than vstr?
obs_frac, dbgstr = get_obs_frac(vlists, varnames)
if debug:
print '%s | %s' % (get_varval_str(vstrs), dbgstr)
yfname = get_tm_fname(varnames, vstrs, metric, ptilestr, cg=choice_grouping, tv=lbplotting.ungetptvar(ptilestr), use_relative_affy=use_relative_affy, extra_str=metric_extra_str)
try:
with open(yfname) as yfile:
yamlfo = json.load(yfile) # too slow with yaml
except IOError: # os.path.exists() is too slow with this many files
missing_vstrs['missing'].append((None, vstrs))
continue
# the perfect line is higher for lbi, but lower for lbr, hence the abs(). Occasional values can go past/better than perfect, so maybe it would make sense to reverse sign for lbi/lbr rather than taking abs(), but I think this is better
if treat_clusters_together:
add_plot_vals(yamlfo, vlists, varnames, obs_frac)
else:
iclusts_in_file = []
if 'percentiles' in yamlfo:
iclusts_in_file = sorted([int(k.split('-')[1]) for k in yamlfo['percentiles']['per-seq'] if 'iclust-' in k]) # if there's info for each cluster, it's in sub-dicts under 'iclust-N' (older files won't have it)
else:
iclusts_in_file = sorted([int(k.split('-')[1]) for k in yamlfo if 'iclust-' in k]) # if there's info for each cluster, it's in sub-dicts under 'iclust-N' (older files won't have it)
missing_iclusts = [i for i in range(args.n_sim_events_per_proc) if i not in iclusts_in_file]
if len(missing_iclusts) > 0:
print ' %s missing %d/%d iclusts (i = %s) from file %s' % (utils.color('red', 'error'), len(missing_iclusts), args.n_sim_events_per_proc, ' '.join(str(i) for i in missing_iclusts), yfname)
# assert iclusts_in_file == list(range(args.n_sim_events_per_proc)) # I'm not sure why I added this (presumably because I thought I might not see missing ones any more), but I'm seeing missing ones now (because clusters were smaller than min_selection_metric_cluster_size)
for iclust in iclusts_in_file:
add_plot_vals(yamlfo, vlists, varnames, obs_frac, iclust=iclust)
# print info about missing and empty results
n_printed, n_max_print = 0, 5
for mkey, vstrs_list in missing_vstrs.items(): # ok now it's iclust and vstrs list, but what tf am I going to name that
if len(vstrs_list) == 0:
continue
print ' %s: %d families' % (mkey, len(vstrs_list))
print ' %s iclust' % get_varname_str()
for iclust, vstrs in vstrs_list:
print ' %s %4s %s' % (get_varval_str(vstrs), iclust, get_tm_fname(varnames, vstrs, metric, ptilestr, cg=choice_grouping, tv=lbplotting.ungetptvar(ptilestr), use_relative_affy=use_relative_affy, extra_str=metric_extra_str))
n_printed += 1
if n_printed >= n_max_print:
print ' [...]'
print ' skipping %d more lines' % (len(vstrs_list) - n_max_print)
break
# average over the replicates/clusters
if (args.n_replicates > 1 or not treat_clusters_together) and len(plotvals) > 0:
if debug:
print ' averaging over %d replicates' % args.n_replicates,
if args.n_sim_events_per_proc is not None:
if treat_clusters_together:
print '(treating %d clusters per proc together)' % args.n_sim_events_per_proc,
else:
print 'times %d clusters per proc:' % args.n_sim_events_per_proc,
print ''
tmplen = str(max(len(pvkey) for pvkey in plotvals))
print (' %'+tmplen+'s N used N expected') % 'pvkey'
for pvkey, ofvals in plotvals.items():
mean_vals, err_vals = [], []
ofvals = {i : vals for i, vals in ofvals.items() if len(vals) > 0} # remove zero-length ones (which should [edit: maybe?] correspond to 'missing'). Note that this only removes one where *all* the vals are missing, whereas if they're partially missing they values they do have will get added as usual below
n_used = [] # just for dbg
tmpvaldict = collections.OrderedDict() # rearrange them into a dict keyed by the appropriate tau/xval
for ikey in ofvals: # <ikey> is an amalgamation of iseeds and icluster, e.g. '20-0'
for pairvals in ofvals[ikey]:
tau, tval = pairvals # reminder: tau is not in general (any more) tau, but is the variable values fulfilling the original purpose of tau (i think x values?) in the plot
tkey = tuple(tau) if isinstance(tau, list) else tau # if it's actually tau, it will be a single value, but if xvar is set to, say, n-sim-seqs-per-gen then it will be a list
if tkey not in tmpvaldict: # these will usually get added in order, except when there's missing ones in some ikeys
tmpvaldict[tkey] = []
tmpvaldict[tkey].append(tval)
tvd_keys = sorted(tmpvaldict) if xvar != 'parameter-variances' else tmpvaldict.keys() # for parameter-variances we want to to keep the original ordering from the command line
for tau in tvd_keys: # note that the <ltmp> for each <tau> are in general different if some replicates/clusters are missing or empty
ltmp = tmpvaldict[tau]
mean_vals.append((tau, numpy.mean(ltmp)))
err_vals.append((tau, numpy.std(ltmp, ddof=1) / math.sqrt(len(ltmp)))) # standard error on mean (for standard deviation, comment out denominator)
n_used.append(len(ltmp))
plotvals[pvkey] = mean_vals
errvals[pvkey] = err_vals
if debug:
n_expected = args.n_replicates
if not treat_clusters_together:
n_expected *= args.n_sim_events_per_proc
print (' %'+tmplen+'s %s %4d%s') % (pvkey, ('%4d' % n_used[0]) if len(set(n_used)) == 1 else utils.color('red', ' '.join(str(n) for n in set(n_used))), n_expected, '' if n_used[0] == n_expected else utils.color('red', ' <--'))
# ----------------------------------------------------------------------------------------
def plotcall(pvkey, xticks, diffs_to_perfect, yerrs, mtmp, ipv=None, imtmp=None, label=None, dummy_leg=False, alpha=0.5, estr=''):
markersize = 15 # 1 if len(xticks) > 1 else 15
linestyle = linestyles.get(mtmp, '-')
if args.plot_metrics.count(mtmp) > 1 and estr != '':
linestyle = 'dotted'
linewidth = linewidths.get(mtmp, 3)
color = None
if ipv is not None:
color = plotting.frozen_pltcolors[ipv % len(plotting.frozen_pltcolors)]
elif imtmp is not None: # used to us <imtmp> to directly get color, but now we want to get the same colors no matter the matplotlib version and order on the command line, so now it just indicates that we should add the metric str
# color = plotting.frozen_pltcolors[imtmp % len(plotting.pltcolors)]
color = metric_color(mtmp)
if yerrs is not None:
ax.errorbar(xticks, diffs_to_perfect, yerr=yerrs, label=legstr(label), color=color, alpha=alpha, linewidth=linewidth, markersize=markersize, marker='.', linestyle=linestyle) #, title='position ' + str(position))
else:
ax.plot(xticks, diffs_to_perfect, label=legstr(label), color=color, alpha=alpha, linewidth=linewidth)
if dummy_leg:
dlabel = mtmp
if not args.dont_plot_extra_strs and estr != '':
dlabel += ' %s' % estr
ax.plot([], [], label=legstr(dlabel), alpha=alpha, linewidth=linewidth, linestyle=linestyle, color='grey' if ipv is not None else color, marker='.', markersize=0)
# elif estr != '':
# fig.text(0.5, 0.7, estr, color='red', fontweight='bold')
# ----------------------------------------------------------------------------------------
def getplotname(mtmp):
if per_x == 'per-seq':
return '%s%s-%s-ptiles-vs-%s-%s' % (affy_key_str, ptilestr, mtmp if mtmp is not None else 'combined', xvar, choice_grouping)
else:
return '%s-ptiles-vs-%s' % (choice_grouping.replace('-vs', ''), xvar)
# ----------------------------------------------------------------------------------------
def yfname(mtmp, estr):
return '%s/%s.yaml' % (get_comparison_plotdir(mtmp, per_x, extra_str=estr), getplotname(mtmp))
# ----------------------------------------------------------------------------------------
def getxticks(xvals):
xlabel = treeutils.legtexts.get(xvar, xvar.replace('-', ' '))
if xvar == 'parameter-variances': # special case cause we don't parse this into lists and whatnot here
xticks, xticklabels = [], []
global_pv_vars = None
for ipv, pv_cft_str in enumerate(xvals): # <pv_cft_str> corresponds to one bcr-phylo run, but can contain more than one parameter variance specification
xticks.append(ipv)
pv_vars, xtl_strs = [], []
for pvar_str in pv_cft_str.split('_c_'):
assert '..' in pvar_str # don't handle the uniform-distribution-with-variance method a.t.m.
pvar, pvals = pvar_str.split(',')
def fmt(v, single=False):
if pvar == 'selection-strength':
if v == 1.: fstr = '%.0f'
else: fstr = '%.2f' if single else '%.1f'
else:
fstr = '%d'
return fstr % v
pv_vars.append(pvar)
pvlist = [float(pv) for pv in pvals.split('..')]
xtlstr = '%s-%s'%(fmt(min(pvlist)), fmt(max(pvlist))) if min(pvlist) != max(pvlist) else fmt(pvlist[0], single=True)
xtl_strs.append(xtlstr)
xticklabels.append('\n'.join(xtl_strs))
if global_pv_vars is None:
global_pv_vars = pv_vars
if pv_vars != global_pv_vars:
raise Exception('each bcr-phylo run has to have the same variables with parameter variances, but got %s and %s' % (global_pv_vars, pv_vars))
xlabel = ', '.join(treeutils.legtexts.get(p, p.replace('-', ' ')) for p in global_pv_vars)
elif isinstance(xvals[0], tuple) or isinstance(xvals[0], list): # if it's a tuple/list (not sure why it's sometimes one vs other times the other), use (more or less arbitrary) integer x axis values
def tickstr(t):
if len(t) < 4:
return ', '.join(str(v) for v in t)
else:
return '%s -\n %s\n(%d)' % (t[0], t[-1], len(t)) #, t[1] - t[0])
xticks = list(range(len(xvals)))
xticklabels = [tickstr(t) for t in xvals]
else:
xticks = xvals
xticklabels = [str(t) for t in xvals]
return xticks, xticklabels, xlabel
# ----------------------------------------------------------------------------------------
_, varnames, val_lists, valstrs = get_var_info(args, args.scan_vars['get-tree-metrics'])
plotvals, errvals = collections.OrderedDict(), collections.OrderedDict()
fig, ax = plotting.mpl_init()
xticks, xlabel = None, None
if action == 'plot':
read_plot_info()
outfo = []
if len(plotvals) == 0:
print ' %s no plotvals for %s %s %s' % (utils.color('yellow', 'warning'), metric, per_x, choice_grouping)
return
for ipv, pvkey in enumerate(plotvals):
xvals, diffs_to_perfect = zip(*plotvals[pvkey])
xticks, xticklabels, xlabel = getxticks(xvals)
# assert xvals == tuple(sorted(xvals)) # this definitely can happen, but maybe not atm? and maybe not a big deal if it does. So maybe should remove this
yerrs = zip(*errvals[pvkey])[1] if pvkey in errvals else None # each is pairs tau, err
plotcall(pvkey, xticks, diffs_to_perfect, yerrs, metric, ipv=ipv, label=pvkey, estr=metric_extra_str)
outfo.append((pvkey, {'xvals' : xvals, 'yvals' : diffs_to_perfect, 'yerrs' : yerrs}))
with open(yfname(metric, metric_extra_str), 'w') as yfile: # write json file to be read by 'combine-plots'
json.dump(outfo, yfile)
title = lbplotting.mtitlestr(per_x, metric, short=True, max_len=7) + ': '
plotdir = get_comparison_plotdir(metric, per_x, extra_str=metric_extra_str)
ylabelstr = metric.upper()
elif action == 'combine-plots':
pvks_from_args = set([pvkeystr(vlists, varnames, get_obs_frac(vlists, varnames)[0]) for vlists in val_lists]) # have to call this fcn at least once just to set pvlabel (see above) [but now we're also using the results below UPDATE nvmd didn't end up doing it that way, but I'm leaving the return value there in case I want it later]
plotfos = collections.OrderedDict()
for mtmp, estr in zip(args.plot_metrics, args.plot_metric_extra_strs):
if ptilestr not in [v for v, l in lbplotting.single_lbma_cfg_vars(mtmp, final_plots=True)]: # i.e. if the <ptilestr> (variable name) isn't in any of the (variable name, label) pairs (e.g. n-ancestor for lbi; we need this here because of the set() in the calling block)
continue
if not os.path.exists(yfname(mtmp, estr)):
print ' %s missing %s' % (utils.color('yellow', 'warning'), yfname(mtmp, estr))
continue
with open(yfname(mtmp, estr)) as yfile:
mkey = mtmp
if estr != '':
mkey = '%s%s%s' % (mtmp, xdelim, estr) # this is ugly, but we need to be able to split it apart in the loop just below here
plotfos[mkey] = collections.OrderedDict(json.load(yfile))
if len(plotfos[mkey]) == 0:
raise Exception('read zero length info from %s' % yfname(mtmp, estr)) # if this happens when we're writing the file (above), we can skip it, but I think we have to crash here (just rerun without this metric/extra_str). It probably means you were missing the dtr files for this per_x/cgroup
if len(plotfos) == 0:
print ' nothing to plot'
return
pvks_from_file = set([tuple(pfo.keys()) for pfo in plotfos.values()]) # list of lists of pv keys (to make sure the ones from each metric's file are the same)
if len(pvks_from_file) > 1: # eh, they can be different now if I ran different metrics with different argument lists
print ' %s different lists of pv keys for different metrics: %s' % (utils.color('yellow', 'warning'), pvks_from_file)
pvk_list = sorted(pvks_from_file, key=len)[0] # use the shortest one
else:
pvk_list = list(pvks_from_file)[0]
if args.pvks_to_plot is not None:
# pvk_list = [p for p in list(pvks_from_file)[0] if p in pvks_from_args] # don't do it this way since if you only ask it to plot one value it'll get the wrong file path (since it'll no longer make a subdir level for that variable)
ptmp = [p for p in pvk_list if p in args.pvks_to_plot]
if len(ptmp) == 0:
raise Exception('requirement in --pvks-to-plot \'%s\' didn\'t give us any from the list %s' % (args.pvks_to_plot, pvk_list))
pvk_list = ptmp
for ipv, pvkey in enumerate(pvk_list):
for imtmp, (mkey, pfo) in enumerate(plotfos.items()):
mtmp, estr = (mkey, '') if xdelim not in mkey else mkey.split(xdelim)
xticks, xticklabels, xlabel = getxticks(pfo[pvkey]['xvals'])
plotcall(pvkey, xticks, pfo[pvkey]['yvals'], pfo[pvkey]['yerrs'], mtmp, label=pvkey if (imtmp == 0 and len(pvk_list) > 1) else None, ipv=ipv if len(pvk_list) > 1 else None, imtmp=imtmp, dummy_leg=ipv==0, estr=estr)
# if ''.join(args.plot_metric_extra_strs) == '': # no extra strs
# title = '+'.join(plotfos) + ': '
# else:
# title = '+'.join(set(args.plot_metrics)) + ': '
title = ''
plotdir = get_comparison_plotdir('combined', per_x)
ylabelstr = 'metric'
else:
assert False
ymin, ymax = ax.get_ylim()
# if ptilestr == 'affinity':
# ymin, ymax = 0, max(ymax, 25)
# elif ptilestr == 'n-ancestors':
# ymin, ymax = 0, max(ymax, 1.5)
log, adjust = '', {}
if xvar == 'lb-tau' and len(xticks) > 1:
ax.plot([1./args.seq_len, 1./args.seq_len], (ymin, ymax), linewidth=3, alpha=0.7, color='darkred', linestyle='--') #, label='1/seq len')
if xvar == 'carry-cap':
log = 'x'
if ax.get_ylim()[1] < 1:
adjust['left'] = 0.21
if ax.get_ylim()[1] < 0.01:
adjust['left'] = 0.26
adjust['bottom'] = 0.25
adjust['top'] = 0.9
if xticklabels is not None and '\n' in xticklabels[0]:
adjust['bottom'] = 0.3
import matplotlib.pyplot as plt
plt.xlabel('xlabel', fontsize=14)
n_ticks = 4
dy = (ymax - ymin) / float(n_ticks - 1)
yticks, yticklabels = None, None
# if ptilestr != 'affinity':
# yticks = [int(y) if ptilestr == 'affinity' else utils.round_to_n_digits(y, 3) for y in numpy.arange(ymin, ymax + 0.5*dy, dy)]
# yticklabels = ['%s'%y for y in yticks]
if per_x == 'per-seq':
title += 'choosing %s' % (choice_grouping.replace('within-families', 'within each family').replace('among-', 'among all '))
if use_relative_affy:
fig.text(0.5, 0.87, 'relative %s' % ptilestr, fontsize=15, color='red', fontweight='bold')
leg_loc = [0.04, 0.6]
# if metric != 'lbi' and len(title) < 17:
# leg_loc[0] = 0.7
plotting.mpl_finish(ax, plotdir, getplotname(metric),
xlabel=xlabel,
# ylabel='%s to perfect\nfor %s ptiles in [%.0f, 100]' % ('percentile' if ptilelabel == 'affinity' else ptilelabel, ylabelstr, min_ptile_to_plot),
ylabel='%s to perfect' % ('percentile' if ptilelabel == 'affinity' else ptilelabel),
title=title, leg_title=legstr(pvlabel[0], title=True), leg_prop={'size' : 12}, leg_loc=leg_loc,
xticks=xticks, xticklabels=xticklabels, xticklabelsize=12 if xticklabels is not None and '\n' in xticklabels[0] else 16,
yticks=yticks, yticklabels=yticklabels,
ybounds=(ymin, ymax), log=log, adjust=adjust,
)
# ----------------------------------------------------------------------------------------
def run_bcr_phylo(args): # also caches parameters
base_args, varnames, _, valstrs = get_var_info(args, args.scan_vars['simu'])
cmdfos = []
print ' bcr-phylo: running %d combinations of: %s' % (len(valstrs), ' '.join(varnames))
if args.debug:
print ' %s' % ' '.join(varnames)
n_already_there = 0
for icombo, vstrs in enumerate(valstrs):
if args.debug:
print ' %s' % ' '.join(vstrs)
outdir = get_bcr_phylo_outdir(varnames, vstrs)
if utils.output_exists(args, get_partition_fname(varnames, vstrs, 'bcr-phylo'), offset=8, debug=args.debug):
n_already_there += 1
continue
cmd = './bin/bcr-phylo-run.py --actions %s --dont-get-tree-metrics --base-outdir %s %s' % (args.bcr_phylo_actions, outdir, ' '.join(base_args))
for vname, vstr in zip(varnames, vstrs):
vstr_for_cmd = vstr
if vname == 'parameter-variances':
vstr_for_cmd = vstr_for_cmd.replace('_c_', ':') # necessary so we can have multiple different parameters with variances for each bcr-phylo-run.py cmd
cmd += ' --%s %s' % (vname, vstr_for_cmd)
if 'context' in vname:
cmd += ' --restrict-available-genes'
if args.no_scan_parameter_variances is not None:
cmd += ' --parameter-variances %s' % args.no_scan_parameter_variances # we don't parse through this at all here, which means it's the same for all combos of variables (which I think makes sense -- we probably don't even really want to vary most variables if this is set)
if args.n_sim_events_per_proc is not None:
cmd += ' --n-sim-events %d' % args.n_sim_events_per_proc
if args.n_max_queries is not None:
cmd += ' --n-max-queries %d' % args.n_max_queries
if args.dont_observe_common_ancestors:
cmd += ' --dont-observe-common-ancestors'
if args.overwrite:
cmd += ' --overwrite'
if args.only_csv_plots:
cmd += ' --only-csv-plots'
if args.n_sub_procs > 1:
cmd += ' --n-procs %d' % args.n_sub_procs
if args.sub_slurm:
cmd += ' --slurm'
# cmd += ' --debug 2'
cmdfos += [{
'cmd_str' : cmd,
'outfname' : get_partition_fname(varnames, vstrs, 'bcr-phylo'),
'logdir' : outdir,
'workdir' : '%s/bcr-phylo-work/%d' % (args.workdir, icombo),
}]
if n_already_there > 0:
print ' %d / %d skipped (outputs exist, e.g. %s)' % (n_already_there, len(valstrs), get_partition_fname(varnames, vstrs, 'bcr-phylo'))
if len(cmdfos) > 0:
if args.dry:
print ' %s' % '\n '.join(cfo['cmd_str'] for cfo in cmdfos)
else:
print ' starting %d jobs' % len(cmdfos)
utils.run_cmds(cmdfos, debug='write:bcr-phylo.log', batch_system='slurm' if args.slurm else None, n_max_procs=args.n_max_procs, allow_failure=True)
# ----------------------------------------------------------------------------------------
def get_tree_metrics(args):
_, varnames, _, valstrs = get_var_info(args, args.scan_vars['get-tree-metrics']) # can't use base_args a.t.m. since it has the simulation/bcr-phylo args in it
cmdfos = []
print ' get-tree-metrics (%s): running %d combinations of: %s' % (args.metric_method, len(valstrs), ' '.join(varnames))
n_already_there = 0
for icombo, vstrs in enumerate(valstrs):
if args.debug:
print ' %s' % ' '.join(vstrs)
if utils.all_outputs_exist(args, get_all_tm_fnames(varnames, vstrs, metric_method=args.metric_method, extra_str=args.extra_plotstr), outlabel='get-tree-metrics', offset=8, debug=args.debug):
n_already_there += 1
continue
if not args.dry:
tmpoutdir = get_tree_metric_outdir(varnames, vstrs, metric_method=args.metric_method)
if not os.path.isdir(tmpoutdir):
os.makedirs(tmpoutdir)
# it would probably be better to use dtr-run.py for everything, but then i'd be nervous i wasn't testing the partitiondriver version of the code enough
if args.metric_method is None: # lb metrics, i.e. actually running partis and getting tree metrics
cmd = './bin/partis get-tree-metrics --is-simu --infname %s --plotdir %s --outfname %s --selection-metric-fname %s' % (get_simfname(varnames, vstrs), get_tree_metric_plotdir(varnames, vstrs, extra_str=args.extra_plotstr),
get_partition_fname(varnames, vstrs, 'bcr-phylo'), utils.insert_before_suffix('-selection-metrics', get_partition_fname(varnames, vstrs, 'get-tree-metrics'))) # we don't actually use the --selection-metric-fname for anything, but if we don't set it then all the different get-tree-metric jobs write their output files to the same selection metric file in the bcr-phylo dir
cmd += ' --seed %s' % args.random_seed # NOTE second/commented version this is actually wrong: vstrs[varnames.index('seed')] # there isn't actually a reason for different seeds here (we want the different seeds when running bcr-phylo), but oh well, maybe it's a little clearer this way
if args.no_tree_plots:
cmd += ' --ete-path None'
# if args.n_sub_procs > 1: # TODO get-tree-metrics doesn't paralellize anything atm
# cmd += ' --n-procs %d' % args.n_sub_procs
else: # non-lb metrics, i.e. trying to predict with shm, etc.
cmd = './bin/dtr-run.py --metric-method %s --infname %s --base-plotdir %s' % (args.metric_method,
get_simfname(varnames, vstrs),
get_tree_metric_plotdir(varnames, vstrs, metric_method=args.metric_method, extra_str=args.extra_plotstr))
if args.metric_method == 'dtr':
if args.train_dtr and args.overwrite: # make sure no training files exist, since we don\'t want treeutils.train_dtr_model() to overwrite existing ones (since training can be really slow)
assert set([os.path.exists(f) for f in get_all_tm_fnames(varnames, vstrs, metric_method=args.metric_method, extra_str=args.extra_plotstr)]) == set([False])
cmd += ' --action %s' % ('train' if args.train_dtr else 'test')
cmd += ' --dtr-path %s' % (args.dtr_path if args.dtr_path is not None else get_dtr_model_dir(varnames, vstrs, extra_str=args.extra_plotstr)) # if --dtr-path is set, we're reading the model from there; otherwise we write a new model to the normal/auto location for these parameters (i.e. the point of --dtr-path is to point at the location from a different set of parameters)
if args.dtr_cfg is not None:
cmd += ' --dtr-cfg %s' % args.dtr_cfg
cmd += ' --lb-tau %s' % get_vlval(vstrs, varnames, 'lb-tau')
if len(args.lb_tau_list) > 1:
cmd += ' --lbr-tau-factor 1 --dont-normalize-lbi'
if args.only_csv_plots:
cmd += ' --only-csv-plots'
if args.n_max_queries is not None:
cmd += ' --n-max-queries %d' % args.n_max_queries
cmd += ' --min-selection-metric-cluster-size 5' # if n per gen is small, sometimes the clusters are a bit smaller than 10, but we don't really want to skip any clusters here (especially because it confuses the plotting above)
if args.include_relative_affy_plots:
cmd += ' --include-relative-affy-plots'
cmdfos += [{
'cmd_str' : cmd,
'outfname' : get_all_tm_fnames(varnames, vstrs, metric_method=args.metric_method, extra_str=args.extra_plotstr)[0],
'workdir' : get_tree_metric_plotdir(varnames, vstrs, metric_method=args.metric_method, extra_str=args.extra_plotstr),
}]
if n_already_there > 0:
print ' %d / %d skipped (outputs exist, e.g. %s)' % (n_already_there, len(valstrs), get_all_tm_fnames(varnames, vstrs, metric_method=args.metric_method, extra_str=args.extra_plotstr)[0])
if len(cmdfos) > 0:
print ' %s %d jobs' % ('--dry: would start' if args.dry else 'starting', len(cmdfos))
if args.dry:
print ' first command: %s' % cmdfos[0]['cmd_str']
else:
logstr = 'get-tree-metrics'
if args.metric_method == 'dtr':
logstr += '-train' if args.train_dtr else '-test'
utils.run_cmds(cmdfos, debug='write:%s.log'%logstr, batch_system='slurm' if args.slurm else None, n_max_procs=args.n_max_procs, allow_failure=True)
# ----------------------------------------------------------------------------------------
all_actions = ['get-lb-bounds', 'bcr-phylo', 'get-tree-metrics', 'plot', 'combine-plots']
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--actions', default=':'.join(a for a in all_actions if a not in ['get-lb-bounds', 'combine-plots']))
parser.add_argument('--bcr-phylo-actions', default='simu:cache-parameters:partition')
parser.add_argument('--test', action='store_true')
parser.add_argument('--carry-cap-list', default='1000')
parser.add_argument('--n-sim-seqs-per-gen-list', default='30:50:75:100:150:200', help='colon-separated list of comma-separated lists of the number of sequences for bcr-phylo to sample at the times specified by --obs-times-list')
parser.add_argument('--n-sim-events-per-proc', type=int, help='number of rearrangement events to simulate in each process (default is set in bin/bcr-phylo-run.py)')
parser.add_argument('--obs-times-list', default='125,150', help='colon-separated list of comma-separated lists of bcr-phylo observation times')
parser.add_argument('--lb-tau-list', default='0.0005:0.001:0.002:0.003:0.004:0.008:0.012')
parser.add_argument('--metric-for-target-distance-list', default='aa') # it would be nice to not set defaults here, since it clutters up the bcr-phylo simulator.py calls, but this insulates us against the defaults in bcr-phylo simulator.py changing at some point
parser.add_argument('--leaf-sampling-scheme-list', default='uniform-random')
parser.add_argument('--target-count-list', default='1')
parser.add_argument('--n-target-clusters-list') # NOTE do *not* set a default here, since in bcr-phylo simulator.py the default is None
parser.add_argument('--min-target-distance-list')
parser.add_argument('--context-depend-list')
parser.add_argument('--paratope-positions-list')
parser.add_argument('--metric-method', choices=['shm', 'fay-wu-h', 'cons-dist-nuc', 'cons-dist-aa', 'delta-lbi', 'aa-lbi', 'aa-lbr', 'dtr', 'cons-lbi'], help='method/metric to compare to/correlate with affinity (for use with get-tree-metrics action). If not set, run partis to get lb metrics.')
parser.add_argument('--plot-metrics', default='lbi:lbr') # don't add dtr until it can really run with default options (i.e. model files are really settled)
parser.add_argument('--plot-metric-extra-strs', help='extra strs for each metric in --plot-metrics (i.e. corresponding to what --extra-plotstr was set to during get-tree-metrics for that metric)')
parser.add_argument('--dont-plot-extra-strs', action='store_true', help='while we still use the strings in --plot-metric-extra-strs to find the right dir to get the plot info from, we don\'t actually put the str in the plot (i.e. final plot versions where we don\'t want to see which dtr version it is)')
parser.add_argument('--combo-extra-str', help='extra label for combine-plots action i.e. write to combined-%s/ subdir instead of combined/')
parser.add_argument('--pvks-to-plot', help='only plot these line/legend values when combining plots')
parser.add_argument('--train-dtr', action='store_true')
parser.add_argument('--dtr-path', help='Path from which to read decision tree regression training data. If not set (and --metric-method is dtr), we use a default (see --train-dtr).')
parser.add_argument('--dtr-cfg', help='yaml file with dtr training parameters (read by treeutils.calculate_non_lb_tree_metrics()). If not set, default parameters are taken from treeutils.py')
parser.add_argument('--selection-strength-list', default='1.0')
parser.add_argument('--no-scan-parameter-variances', help='Configures parameter variance among families (see bcr-phylo-run.py help for details). Use this version if you only want *one* combination, i.e. if you\'re not scanning across variable combinations: all the different variances go into one bcr-phylo-run.py run (this could be subsumed into the next arg, but for backwards compatibility/cmd line readability it\'s nice to keep it).')
parser.add_argument('--parameter-variances-list', help='Configures parameter variance among families (see bcr-phylo-run.py help for details). Use this version for scanning several combinations. Colons \':\' separate different bcr-phylo-run.py runs, while \'_c_\' separate parameter-variances for multiple variables within each bcr-phylo-run.py run.')
parser.add_argument('--dont-observe-common-ancestors', action='store_true')
parser.add_argument('--zip-vars', help='colon-separated list of variables for which to pair up values sequentially, rather than doing all combinations')
parser.add_argument('--seq-len', default=400, type=int)
parser.add_argument('--n-replicates', default=1, type=int)
parser.add_argument('--iseeds', help='if set, only run these replicate indices (i.e. these corresponds to the increment *above* the random seed)')
parser.add_argument('--n-max-procs', type=int, help='Max number of *child* procs (see --n-sub-procs). Default (None) results in no limit.')
parser.add_argument('--n-sub-procs', type=int, default=1, help='Max number of *grandchild* procs (see --n-max-procs)')
parser.add_argument('--n-max-queries', type=int, help='stop after reading this many queries from whatever is input file for this step (NOTE doesn\'t necessarily work for every action)')
parser.add_argument('--random-seed', default=0, type=int, help='note that if --n-replicates is greater than 1, this is only the random seed of the first replicate')
parser.add_argument('--base-outdir', default='%s/partis/tree-metrics' % os.getenv('fs', default=os.getenv('HOME')))
parser.add_argument('--label', default='test')
parser.add_argument('--extra-plotstr', default='', help='if set, put plots resulting from \'get-tree-metrics\' into a separate subdir using this string, rather than just plots/ (e.g. for plotting with many different dtr versions)')
parser.add_argument('--include-relative-affy-plots', action='store_true')
parser.add_argument('--only-csv-plots', action='store_true', help='only write csv/yaml versions of plots (for future parsing), and not the actual svg files (which is slow)')
parser.add_argument('--no-tree-plots', action='store_true', help='don\'t make any of the tree plots, which are slow (this just sets --ete-path to None)')
parser.add_argument('--overwrite', action='store_true') # not really propagated to everything I think
parser.add_argument('--debug', action='store_true')
parser.add_argument('--dry', action='store_true')
parser.add_argument('--slurm', action='store_true', help='run child procs with slurm')
parser.add_argument('--sub-slurm', action='store_true', help='run grandchild procs with slurm')
parser.add_argument('--workdir') # default set below
parser.add_argument('--final-plot-xvar', default='lb-tau')
parser.add_argument('--legend-var')
parser.add_argument('--partis-dir', default=os.getcwd(), help='path to main partis install dir')
parser.add_argument('--ete-path', default=('/home/%s/anaconda_ete/bin' % os.getenv('USER')) if os.getenv('USER') is not None else None)
# specific to get-lb-bounds:
parser.add_argument('--n-tau-lengths-list', help='set either this or --n-generations-list')
parser.add_argument('--n-generations-list', default='4:5:6:7:8:9:10:12', help='set either this or --n-tau-lengths-list') # going to 20 uses a ton of memory, not really worth waiting for
parser.add_argument('--max-lb-n-offspring', default=2, type=int, help='multifurcation number for max lb calculation')
parser.add_argument('--only-metrics', default='lbi:lbr', help='which (of lbi, lbr) metrics to do lb bound calculation')
parser.add_argument('--make-plots', action='store_true', help='note: only for get-lb-bounds')
args = parser.parse_args()
args.scan_vars = {'simu' : ['carry-cap', 'n-sim-seqs-per-gen', 'obs-times', 'seed', 'metric-for-target-distance', 'selection-strength', 'leaf-sampling-scheme', 'target-count', 'n-target-clusters', 'min-target-distance', 'context-depend', 'paratope-positions', 'parameter-variances'],}
args.scan_vars['get-tree-metrics'] = args.scan_vars['simu'] + ['lb-tau']
sys.path.insert(1, args.partis_dir + '/python')
try:
import utils
import treeutils
import plotting
import lbplotting
except ImportError as e:
print e
raise Exception('couldn\'t import from main partis dir \'%s\' (set with --partis-dir)' % args.partis_dir)
args.actions = utils.get_arg_list(args.actions, choices=all_actions)
args.zip_vars = utils.get_arg_list(args.zip_vars)
args.carry_cap_list = utils.get_arg_list(args.carry_cap_list, intify=True, forbid_duplicates=args.zip_vars is None or 'carry-cap' not in args.zip_vars) # if we're zipping the var, we have to allow duplicates, but then check for them again after we've done combos in get_var_info()
args.n_sim_seqs_per_gen_list = utils.get_arg_list(args.n_sim_seqs_per_gen_list, list_of_lists=True, intify=True, forbid_duplicates=args.zip_vars is None or 'n-sim-seqs-per-gen' not in args.zip_vars)
args.obs_times_list = utils.get_arg_list(args.obs_times_list, list_of_lists=True, intify=True, forbid_duplicates=args.zip_vars is None or 'obs-times' not in args.zip_vars)
args.lb_tau_list = utils.get_arg_list(args.lb_tau_list, floatify=True, forbid_duplicates=True)
args.metric_for_target_distance_list = utils.get_arg_list(args.metric_for_target_distance_list, forbid_duplicates=True, choices=['aa', 'nuc', 'aa-sim-ascii', 'aa-sim-blosum'])
args.leaf_sampling_scheme_list = utils.get_arg_list(args.leaf_sampling_scheme_list, forbid_duplicates=True, choices=['uniform-random', 'affinity-biased', 'high-affinity']) # WARNING 'high-affinity' gets called 'perfect' in the legends and 'affinity-biased' gets called 'high affinity'
args.target_count_list = utils.get_arg_list(args.target_count_list, forbid_duplicates=True)
args.n_target_clusters_list = utils.get_arg_list(args.n_target_clusters_list, forbid_duplicates=True)
args.min_target_distance_list = utils.get_arg_list(args.min_target_distance_list, forbid_duplicates=True)
args.context_depend_list = utils.get_arg_list(args.context_depend_list, forbid_duplicates=True)
args.paratope_positions_list = utils.get_arg_list(args.paratope_positions_list, forbid_duplicates=True, choices=['all', 'cdrs'])
args.parameter_variances_list = utils.get_arg_list(args.parameter_variances_list, forbid_duplicates=True)
args.plot_metrics = utils.get_arg_list(args.plot_metrics)
args.plot_metric_extra_strs = utils.get_arg_list(args.plot_metric_extra_strs)
if args.plot_metric_extra_strs is None:
args.plot_metric_extra_strs = ['' for _ in args.plot_metrics]
if len(args.plot_metrics) != len(args.plot_metric_extra_strs):
raise Exception('--plot-metrics %d not same length as --plot-metric-extra-strs %d' % (len(args.plot_metrics), len(args.plot_metric_extra_strs)))
args.pvks_to_plot = utils.get_arg_list(args.pvks_to_plot)
args.selection_strength_list = utils.get_arg_list(args.selection_strength_list, floatify=True, forbid_duplicates=True)
args.n_tau_lengths_list = utils.get_arg_list(args.n_tau_lengths_list, floatify=True)
args.n_generations_list = utils.get_arg_list(args.n_generations_list, intify=True)
args.only_metrics = utils.get_arg_list(args.only_metrics)
args.iseeds = utils.get_arg_list(args.iseeds, intify=True)
if [args.n_tau_lengths_list, args.n_generations_list].count(None) != 1:
raise Exception('have to set exactly one of --n-tau-lengths, --n-generations')
import random
random.seed(args.random_seed) # somehow this is necessary to get the same results, even though I'm not using the module anywhere directly
numpy.random.seed(args.random_seed)
if args.workdir is None:
args.workdir = utils.choose_random_subdir('/tmp/%s/hmms' % (os.getenv('USER', default='partis-work')))
# ----------------------------------------------------------------------------------------
for action in args.actions:
if action == 'get-lb-bounds':
calc_lb_bounds(args)
elif action == 'bcr-phylo':
run_bcr_phylo(args)
elif action == 'get-tree-metrics':
get_tree_metrics(args)
elif action in ['plot', 'combine-plots'] and not args.dry:
assert args.extra_plotstr == '' # only use --extra-plotstr for get-tree-metrics, for this use --plot-metric-extra-strs (because we in general have multiple --plot-metrics when we're here)
assert args.metric_method is None # when plotting, you should only be using --plot-metrics
_, varnames, val_lists, valstrs = get_var_info(args, args.scan_vars['get-tree-metrics'])
print 'plotting %d combinations of %d variable%s (%s) with %d families per combination to %s' % (len(valstrs), len(varnames), utils.plural(len(varnames)), ', '.join(varnames), 1 if args.n_sim_events_per_proc is None else args.n_sim_events_per_proc, get_comparison_plotdir(None, None))
procs = []
pchoice = 'per-seq'
if action == 'plot':
for metric, estr in zip(args.plot_metrics, args.plot_metric_extra_strs):
utils.prep_dir(get_comparison_plotdir(metric, None, extra_str=estr), subdirs=[pchoice], wildlings=['*.html', '*.svg', '*.yaml'])
cfg_list = lbplotting.single_lbma_cfg_vars(metric)
cfg_list = lbplotting.add_use_relative_affy_stuff(cfg_list, include_relative_affy_plots=args.include_relative_affy_plots)
for ptvar, ptlabel, use_relative_affy in cfg_list:
print ' %s %-s %-13s%-s' % (utils.color('blue', metric), utils.color('purple', estr, width=20, padside='right') if estr != '' else 20*' ', ptvar, utils.color('green', '(relative)') if use_relative_affy else '')
for cgroup in treeutils.cgroups:
print ' %-12s %15s %s' % (pchoice, cgroup, ptvar)
arglist, kwargs = (args, action, metric, pchoice, cgroup, ptvar, ptlabel, args.final_plot_xvar), {'use_relative_affy' : use_relative_affy, 'metric_extra_str' : estr}
if args.test:
make_plots(*arglist, **kwargs)
else:
procs.append(multiprocessing.Process(target=make_plots, args=arglist, kwargs=kwargs))
if not args.test:
utils.run_proc_functions(procs)
for metric, estr in zip(args.plot_metrics, args.plot_metric_extra_strs):
plotting.make_html(get_comparison_plotdir(metric, pchoice, extra_str=estr), n_columns=2)
elif action == 'combine-plots':
utils.prep_dir(get_comparison_plotdir('combined', None), subdirs=[pchoice], wildlings=['*.html', '*.svg'])
cfg_list = set([ppair for mtmp in args.plot_metrics for ppair in lbplotting.single_lbma_cfg_vars(mtmp)]) # I don't think we care about the order
cfg_list = lbplotting.add_use_relative_affy_stuff(cfg_list, include_relative_affy_plots=args.include_relative_affy_plots)
for ptvar, ptlabel, use_relative_affy in cfg_list:
print ptvar
for cgroup in treeutils.cgroups:
print ' ', cgroup
make_plots(args, action, None, pchoice, cgroup, ptvar, ptlabel, args.final_plot_xvar, use_relative_affy=use_relative_affy)
plotting.make_html(get_comparison_plotdir('combined', pchoice), n_columns=2)
else:
assert False
| gpl-3.0 |
flightgong/scikit-learn | sklearn/linear_model/tests/test_base.py | 8 | 10058 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data
from sklearn.utils import check_random_state
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
"""
Test LinearRegression on a simple dataset.
"""
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_fit_intercept():
"""
Test assertions on betas shape.
"""
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
"""Test output format of sparse_center_data, when input is csr"""
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
| bsd-3-clause |
glos/glos-qartod | glos_qartod/run.py | 1 | 4052 | import os
import sys
import glob
import pandas as pd
from redis import Redis
from rq import Queue
from netCDF4 import Dataset
from glos_qartod import cli
from glos_qartod import get_logger
def main():
q = Queue(connection=Redis())
conf_file, proc_dir = sys.argv[1:3]
sheets = pd.read_excel(conf_file, None)
conf = sheets['Variable Config']
mappings = sheets['Mappings'].set_index('var_name').to_dict()['var_dir']
files = qc_subset(proc_dir, conf, mappings)
for f in files:
q.enqueue(cli.run_qc_str_lock, conf_file, f)
def qc_subset(dir_root, conf, mappings):
"""Returns a subset of the files to QC based on whether there are
defined keys, etc"""
files = []
for row in conf.iterrows():
vals = row[1]
# get all QC keys, i.e. not station, variable, units
qc_keys = vals.iloc[3:]
# get unique tests that are defined for this station/variable
qc_nn = {k.split('.')[0] for k in qc_keys[qc_keys.notnull()].keys()}
# if empty set, i.e. all keys are null, skip processing the batch of
# files
if not qc_nn:
continue
# check for existing variable names in the file. If they all already
# exist, then
var = vals['variable']
station = str(vals['station_id'])
var_dir = mappings.get(var, var)
# form a set of the variable names
qc_varnames = {"qartod_{}_{}_flag".format(var, n) for n in qc_nn}
# some of the variables are named inconsistently, so fall back
# to the directory names if need be
qc_varnames_bkp = {"qartod_{}_{}_flag".format(var_dir, n) for n in
qc_nn}
# get any remapped directories for this variable name, or if none exist,
# get the varaiable name as the target directory
# get the directory matching this station name or get all if * glob
# is used
station_dirs = glob.glob(os.path.join(dir_root, var_dir, station))
for dest_dir in station_dirs:
files.extend(find_files(dest_dir, qc_varnames, qc_varnames_bkp))
return set(files)
def find_files(dest_dir, qc_varnames, qc_varnames_bkp):
nc_files = []
if os.path.exists(dest_dir):
for root, subdir, fname in os.walk(dest_dir):
# find all .nc files in this directory, check if there has been
# qc applied to them, and create absolute paths to them
# path to them
if not fname.endswith('.nc'):
continue
full_path = os.path.join(root, fname)
if not check_if_qc_vars_exist(full_path, qc_varnames,
qc_varnames_bkp):
nc_files.append(full_path)
else:
get_logger().warn("Directory '{}' does not exist but was referenced in config".format(dest_dir))
return nc_files
def check_if_qc_vars_exist(file_path, qc_varnames, qc_varnames_bkp):
"""
Checks that QC variables exist in the corresponding QC file based on
data file's filename.
Returns False if not all the QC variables are present, and True
if they are.
"""
qc_filepath = file_path.rsplit('.', 1)[0] + '.ncq'
# try to fetch the QC file's variable names. If it does not
# exist, no QC has been applied and it must be created later
if not os.path.exists(qc_filepath):
return False
else:
try:
with Dataset(qc_filepath) as f:
qc_vars = f.variables.keys()
# check if all the QC variables exist in the file.
# if they don't, add them to the list of files to be processed
return (qc_varnames.issubset(qc_vars) or
qc_varnames_bkp.issubset(qc_vars))
# if for some reason we can't open the file,
# note the exception and treat the qc variables as empty
except:
get_logger().exception('Failed to open file {}'.format(qc_filepath))
return False
if __name__ == '__main__':
main()
| apache-2.0 |
walterst/qiime | scripts/print_qiime_config.py | 15 | 35150 | #!/usr/bin/env python
from __future__ import division
__author__ = "Jens Reeder"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Jens Reeder", "Dan Knights", "Antonio Gonzalez Pena",
"Justin Kuczynski", "Jai Ram Rideout", "Greg Caporaso",
"Emily TerAvest"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
import re
from os import access, X_OK, R_OK, W_OK, getenv, environ, remove, devnull
from os.path import isdir, exists, split, join
from sys import platform, version as python_version, executable, stdout
from unittest import TestLoader, TextTestRunner, TestCase
from shutil import rmtree
from subprocess import Popen, PIPE, STDOUT
from optparse import SUPPRESS_HELP
core_dependency_missing_msg = "See the QIIME Installation Guide: http://qiime.org/install/install.html"
try:
from numpy import __version__ as numpy_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from scipy import __version__ as scipy_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from tempfile import mkdtemp
from skbio.util import remove_files
from burrito.util import ApplicationNotFoundError, ApplicationError
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from qiime.parse import parse_qiime_config_file
from qiime.util import (load_qiime_config,
get_qiime_project_dir,
get_qiime_library_version,
get_qiime_scripts_dir,
get_rdp_jarpath,
get_java_version,
get_pynast_version,
parse_command_line_parameters,
make_option,
qiime_system_call,
get_qiime_temp_dir)
from qiime.denoiser.utils import check_flowgram_ali_exe
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from biom import __version__ as biom_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from qcli import __version__ as qcli_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from pyqi import __version__ as pyqi_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from qiime_default_reference import __version__ as qdr_lib_version
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from skbio import __version__ as skbio_lib_version
from burrito.util import which
except ImportError as e:
raise ImportError("%s\n%s" % (e, core_dependency_missing_msg))
try:
from pandas import __version__ as pandas_lib_version
except ImportError:
pandas_lib_version = "Not installed."
try:
from matplotlib import __version__ as matplotlib_lib_version
except ImportError:
matplotlib_lib_version = "Not installed."
try:
from emperor import __version__ as emperor_lib_version
except ImportError:
emperor_lib_version = "Not installed."
try:
from burrito import __version__ as burrito_lib_version
except ImportError:
burrito_lib_version = "Not installed."
# current release of bfillings doesn't have __version__. if it gets added in
# future releases, display that info, otherwise just indicate whether it's
# installed or not
try:
import bfillings
bfillings_lib_version = bfillings.__version__
except ImportError:
bfillings_lib_version = "Not installed."
except AttributeError:
bfillings_lib_version = "Installed."
# gdata doesn't have __version__ and adding that is outside of our control.
# just indicate whether it's installed or not
try:
import gdata
except ImportError:
gdata_installed = "Not installed."
else:
gdata_installed = "Installed."
try:
import h5py
h5py_lib_version = (
h5py.__version__ + ' (HDF5 version: %s)' % h5py.version.hdf5_version)
except ImportError:
h5py_lib_version = "Not installed."
pynast_lib_version = get_pynast_version()
if pynast_lib_version is None:
pynast_lib_version = "Not installed."
if which('sortmerna') is None:
sortmerna_lib_version = "Not installed."
else:
_, serr, _ = qiime_system_call("sortmerna --version")
sortmerna_lib_version = serr.strip()
if which('sumaclust') is None:
sumaclust_lib_version = "Not installed."
else:
sout, _, _ = qiime_system_call("sumaclust --help")
sout_lines = sout.split('\n')
sumaclust_lib_version = "Installed, but can't identify version."
for e in sout_lines:
e = e.strip()
if e.startswith('SUMACLUST Version'):
sumaclust_lib_version = e
break
if which('swarm') is None:
swarm_lib_version = "Not installed."
else:
_, serr, return_value = qiime_system_call("swarm --version")
serr = serr.strip()
if serr:
swarm_lib_version = serr.split('\n')[0]
else:
swarm_lib_version = "Installed, but can't identify version."
script_info = {}
script_info['brief_description'] = ("Print and optionally test QIIME "
"configuration details")
script_info['script_description'] = ("Print QIIME configuration details and "
"optionally perform tests of the QIIME "
"base or full install.")
script_info['script_usage'] = []
script_info['script_usage'].append(
("Example 1",
"Print basic QIIME configuration details:", """%prog"""))
script_info['script_usage'].append(
("Example 2",
"Print basic QIIME configuration details and test the base QIIME installation:",
"%prog -t"))
script_info['script_usage'].append(
("Example 3",
"Print basic QIIME configuration details and test the full QIIME installation:",
"%prog -tf"))
script_info['output_description'] = ("Prints QIIME configuration details to "
"standard output.")
script_info['version'] = __version__
script_info['help_on_no_arguments'] = False
script_info['required_options'] = []
script_info['optional_options'] = [
make_option('-t', '--test', action='store_true', default=False,
help='Test the QIIME install and configuration '
'[default: %default]'),
make_option('-b', '--qiime_base_install', action='store_true',
default=True, help=SUPPRESS_HELP),
make_option('-f', '--qiime_full_install', action='store_true',
default=False, help='If passed, report on dependencies required for the '
'QIIME full install. To perform tests of the QIIME '
'full install, you must also pass -t. '
'[default: %default]'),
make_option('--haiku',
action='store_true',
default=False,
help=SUPPRESS_HELP)
]
class QIIMEConfig(TestCase):
def setUp(self):
self.config = load_qiime_config()
def test_cluster_jobs_fp(self):
"""cluster_jobs_fp is set to a valid path and is executable"""
fp = self.config["cluster_jobs_fp"]
if fp:
full_path = which(fp)
if full_path:
fp = full_path
# test if file exists or is in $PATH
self.assertTrue(exists(fp),
"cluster_jobs_fp set to an invalid file path or is not in $PATH: %s" % fp)
modes = {R_OK: "readable",
W_OK: "writable",
X_OK: "executable"}
# test if file readable
self.assertTrue(access(fp, X_OK),
"cluster_jobs_fp is not %s: %s" % (modes[X_OK], fp))
def test_blastmat_dir(self):
"""blastmat_dir is set to a valid path."""
test_qiime_config_variable("blastmat_dir", self.config, self)
def test_pynast_template_alignment_fp(self):
"""pynast_template_alignment, if set, is set to a valid path"""
test_qiime_config_variable("pynast_template_alignment_fp",
self.config, self)
def test_pynast_template_alignment_blastdb_fp(self):
"""pynast_template_alignment_blastdb, if set, is set to a valid path"""
test_qiime_config_variable("pynast_template_alignment_blastdb_fp",
self.config, self)
def test_pynast_template_alignment_blastdb_fp(self):
"""pynast_template_alignment_blastdb, if set, is set to a valid path"""
test_qiime_config_variable("pynast_template_alignment_blastdb_fp",
self.config, self)
def test_get_qiime_scripts_dir(self):
"""Test that we can find the directory containing QIIME scripts."""
# get_qiime_scripts_dir will raise an error if it can't find a scripts
# directory.
scripts_dir = get_qiime_scripts_dir()
self.assertTrue(isdir(scripts_dir), "The QIIME scripts directory does "
"not exist: %s" % scripts_dir)
def test_temp_dir(self):
"""temp_dir is set to a valid path"""
temp_dir = get_qiime_temp_dir()
self.assertTrue(exists(temp_dir),
"temp_dir does not exist: %s" % temp_dir)
self.assertTrue(isdir(temp_dir),
"temp_dir is not a directory: %s" % temp_dir)
self.assertTrue(access(temp_dir, W_OK),
"temp_dir is not writable: %s" % temp_dir)
# we are not testing these values from the qiime_config:
# jobs_to_start 1
# seconds_to_sleep 60
def test_for_unrecognized_values(self):
"""qiime_config has no extra values"""
error_msg_fragment = (" contains unrecognized values:\n%s\nYou can "
"safely remove these values from your QIIME "
"config file as they will be ignored by QIIME.")
qiime_project_dir = get_qiime_project_dir()
orig_config = parse_qiime_config_file(open(qiime_project_dir +
'/qiime/support_files/qiime_config'))
# check the env qiime_config
qiime_config_env_filepath = getenv('QIIME_CONFIG_FP')
if qiime_config_env_filepath:
qiime_config_via_env = parse_qiime_config_file(
open(qiime_config_env_filepath))
extra_vals = []
for key in qiime_config_via_env:
if key not in orig_config:
extra_vals.append(key)
if extra_vals:
self.fail("The QIIME config file set via the QIIME_CONFIG_FP "
"environment variable" +
error_msg_fragment % ", ".join(extra_vals))
# check the qiime_config in $HOME/.qiime_config
home_dir = getenv('HOME')
if (exists(home_dir + "/.qiime_config")):
qiime_config_home = parse_qiime_config_file(
open(home_dir + "/.qiime_config"))
extra_vals = []
for key in qiime_config_home:
if key not in orig_config:
extra_vals.append(key)
if extra_vals:
self.fail("The .qiime_config in your HOME" +
error_msg_fragment % ", ".join(extra_vals))
class QIIMEDependencyBase(QIIMEConfig):
def test_uclust_supported_version(self):
"""uclust is in path and version is supported """
acceptable_version = (1, 2, 22)
self.assertTrue(which('uclust'),
"uclust not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = 'uclust --version'
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split('v')[-1].strip('q')
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported uclust version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_FastTree_supported_version(self):
"""FastTree is in path and version is supported """
acceptable_version = (2, 1, 3)
self.assertTrue(which('FastTree'),
"FastTree not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
# If FastTree is run interactively, it outputs the following line:
# Usage for FastTree version 2.1.3 SSE3:
#
# If run non-interactively:
# FastTree Version 2.1.3 SSE3
command = "FastTree 2>&1 > %s | grep -i version" % devnull
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read().strip()
version_str_matches = re.findall('ersion\s+(\S+)\s+', stdout)
self.assertEqual(len(version_str_matches), 1,
"Could not find FastTree version info in usage text "
"'%s'." % stdout)
version_str = version_str_matches[0]
try:
version = tuple(map(int, version_str.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
acceptable_version_str = '.'.join(map(str, acceptable_version))
self.assertTrue(pass_test,
"Unsupported FastTree version. %s is required, but "
"running %s." % (acceptable_version_str, version_str))
class QIIMEDependencyFull(QIIMEDependencyBase):
def test_ampliconnoise_install(self):
""" AmpliconNoise install looks sane."""
url = "http://qiime.org/install/install.html#ampliconnoise-install-notes"
pyro_lookup_file = getenv('PYRO_LOOKUP_FILE')
self.assertTrue(pyro_lookup_file is not None,
"$PYRO_LOOKUP_FILE variable is not set. See %s for help." % url)
self.assertTrue(exists(pyro_lookup_file),
"$PYRO_LOOKUP_FILE variable is not set to an existing filepath.")
seq_lookup_file = getenv('SEQ_LOOKUP_FILE')
self.assertTrue(seq_lookup_file is not None,
"$SEQ_LOOKUP_FILE variable is not set. See %s for help." % url)
self.assertTrue(exists(seq_lookup_file),
"$SEQ_LOOKUP_FILE variable is not set to an existing filepath.")
self.assertTrue(which("SplitKeys.pl"),
"Couldn't find SplitKeys.pl. " +
"Perhaps AmpliconNoise Scripts directory isn't in $PATH?" +
" See %s for help." % url)
self.assertTrue(which("FCluster"),
"Couldn't find FCluster. " +
"Perhaps the AmpliconNoise bin directory isn't in $PATH?" +
" See %s for help." % url)
self.assertTrue(which("Perseus"),
"Couldn't find Perseus. " +
"Perhaps the AmpliconNoise bin directory isn't in $PATH?" +
" See %s for help." % url)
def test_sourcetracker_installed(self):
"""sourcetracker is installed"""
sourcetracker_path = getenv('SOURCETRACKER_PATH')
self.assertNotEqual(sourcetracker_path, None,
("SOURCETRACKER_PATH is not set. This is "
"only important if you plan to use SourceTracker."))
self.assertTrue(exists(sourcetracker_path),
"SOURCETRACKER_PATH is not set to a valid path: %s" %
sourcetracker_path)
def test_chimeraSlayer_install(self):
"""no obvious problems with ChimeraSlayer install """
# The ChimerSalyer app requires that all its components are installed
# relative to the main program ChimeraSlayer.pl.
# We therefore check that at least one the files is there.
# However, if the directory structure of ChimeraSlayer changes, this test will most
# likely fail as well and need to be updated.
# Tested with the version of microbiomeutil_2010-04-29
chim_slay = which("ChimeraSlayer.pl")
self.assertTrue(chim_slay, "ChimeraSlayer was not found in your $PATH")
dir, app_name = split(chim_slay)
self.assertTrue(
exists(dir + "/ChimeraParentSelector/chimeraParentSelector.pl"),
"ChimeraSlayer depends on external files in directoryies relative to its "
"install directory. These do not appear to be present.")
def test_blastall_fp(self):
"""blastall_fp is set to a valid path"""
blastall = self.config["blastall_fp"]
if not self.config["blastall_fp"].startswith("/"):
# path is relative, figure out absolute path
blast_all = which(blastall)
if not blast_all:
raise ApplicationNotFoundError(
"blastall_fp set to %s, but is not in your PATH. Either use an absolute path to or put it in your PATH." %
blastall)
self.config["blastall_fp"] = blast_all
test_qiime_config_variable("blastall_fp", self.config, self, X_OK)
def test_blast_supported_version(self):
"""blast is in path and version is supported """
acceptable_version = (2, 2, 22)
self.assertTrue(which('blastall'),
"blast not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = 'blastall | grep blastall'
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[1].strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported blast version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_cdbtools_supported_version(self):
"""cdbtools is in path and version is supported """
acceptable_version = (0, 99)
self.assertTrue(which('cdbfasta'),
"cdbtools not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "cdbfasta -v"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[2].strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported cdbtools version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_INFERNAL_supported_version(self):
"""INFERNAL is in path and version is supported """
acceptable_version = (1, 0, 2)
self.assertTrue(which('cmbuild'),
"Infernal not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "cmbuild -h | grep INF"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[2].strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported INFERNAL version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_muscle_supported_version(self):
"""muscle is in path and version is supported """
acceptable_version = (3, 8, 31)
self.assertTrue(which('muscle'),
"muscle not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "muscle -version"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[1].strip('v')
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported muscle version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_mothur_supported_version(self):
"""mothur is in path and version is supported """
acceptable_version = (1, 25, 0)
self.assertTrue(which('mothur'),
"mothur not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
# mothur creates a log file in cwd, so create a tmp and cd there first
log_file = join(get_qiime_temp_dir(), 'mothur.log')
command = "mothur \"#set.logfile(name=%s)\" | grep '^mothur v'" % log_file
stdout, stderr, exit_Status = qiime_system_call(command)
# remove log file
remove_files([log_file], error_on_missing=False)
version_string = stdout.strip().split(' ')[1].strip('v.')
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported mothur version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_denoiser_supported_version(self):
"""denoiser aligner is ready to use """
pass_test = True
try:
check_flowgram_ali_exe()
except (ApplicationNotFoundError, ApplicationError):
pass_test = False
self.assertTrue(pass_test,
"Denoiser flowgram aligner not found or not "
"executable. This may or may not be a problem "
"depending on which components of QIIME you plan to "
"use.")
def test_raxmlHPC_supported_version(self):
"""raxmlHPC is in path and version is supported """
acceptable_version = [(7, 3, 0), (7, 3, 0)]
self.assertTrue(which('raxmlHPC'),
"raxmlHPC not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "raxmlHPC -v | grep version"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[4].strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version in acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported raxmlHPC version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_clearcut_supported_version(self):
"""clearcut is in path and version is supported """
acceptable_version = (1, 0, 9)
self.assertTrue(which('clearcut'),
"clearcut not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "clearcut -V"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip().split(' ')[2].strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version == acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported clearcut version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_cdhit_supported_version(self):
"""cd-hit is in path and version is supported """
self.assertTrue(which('cd-hit'),
"cd-hit not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
# cd-hit does not have a version print in their program
def test_rtax_supported_version(self):
"""rtax is in path and version is supported """
acceptable_version = [(0, 984)]
self.assertTrue(which('rtax'),
"rtax not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "rtax 2>&1 > %s | grep Version | awk '{print $2}'" % devnull
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version in acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported rtax version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_usearch_supported_version(self):
"""usearch is in path and version is supported """
acceptable_version = [(5, 2, 236), (5, 2, 236)]
self.assertTrue(which('usearch'),
"usearch not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "usearch --version"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.split('v')[1]
try:
version = tuple(map(int, version_string.split('.')))
pass_test = version in acceptable_version
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported usearch version. %s is required, but running %s."
% ('.'.join(map(str, acceptable_version)), version_string))
def test_R_supported_version(self):
"""R is in path and version is supported """
minimum_version = (2, 12, 0)
self.assertTrue(which('R'),
"R not found. This may or may not be a problem depending on " +
"which components of QIIME you plan to use.")
command = "R --version | grep 'R version' | awk '{print $3}'"
proc = Popen(command, shell=True, universal_newlines=True,
stdout=PIPE, stderr=STDOUT)
stdout = proc.stdout.read()
version_string = stdout.strip()
try:
version = tuple(map(int, version_string.split('.')))
pass_test = False
if version[0] == minimum_version[0]:
if version[1] == minimum_version[1]:
if version[2] >= minimum_version[2]:
pass_test = True
elif version[1] > minimum_version[1]:
pass_test = True
elif version[0] > minimum_version[0]:
pass_test = True
except ValueError:
pass_test = False
version_string = stdout
self.assertTrue(pass_test,
"Unsupported R version. %s or greater is required, but running %s."
% ('.'.join(map(str, minimum_version)), version_string))
def test_gdata_install(self):
"""gdata is installed"""
# We currently can't programmatically find the version of gdata. An
# issue has been created alerting the gdata devs.
pass_test = True
try:
import gdata
except ImportError:
pass_test = False
self.assertTrue(pass_test, "gdata is not installed.")
def test_h5py(self):
"""h5py is installed"""
self.assertTrue(h5py_lib_version != "Not installed.",
"h5py is not installed. You should install this for "
"improved performance with large BIOM files or if "
"working with BIOM format version 2.x files. For "
"more information, see "
"http://qiime.org/documentation/file_formats.html#biom-file-format-versions")
def test_qiime_config_variable(variable, qiime_config, test,
access_var=R_OK, fail_on_missing=False):
"""test if a variable is set and set to a readable path."""
fp = qiime_config[variable]
if not fp:
if fail_on_missing:
test.fail("%s not set." % variable)
else:
# non-essential file, so do not fail
return
# test if file exists
test.assertTrue(exists(fp), "%s set to an invalid file path: %s" %
(variable, fp))
modes = {R_OK: "readable",
W_OK: "writable",
X_OK: "executable"}
# test if file readable
test.assertTrue(access(fp, access_var),
"%s is not %s: %s" % (variable, modes[access_var], fp))
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
if opts.haiku:
print "QIIME provides insight\nmicrobial in nature\nto ecology"
exit(0)
qiime_config = load_qiime_config()
test = opts.test
qiime_full_install = opts.qiime_full_install
rdp_jarpath = get_rdp_jarpath()
if rdp_jarpath is None:
rdp_version = "Not installed."
else:
rdp_version = split(rdp_jarpath)[1]
java_version = get_java_version()
if java_version is None:
java_version = "Not installed."
system_info = [
("Platform", platform),
("Python version", python_version.replace('\n', ' ')),
("Python executable", executable)]
max_len = max([len(e[0]) for e in system_info])
print "\nSystem information"
print "=================="
for v in system_info:
print "%*s:\t%s" % (max_len, v[0], v[1])
print "\nQIIME default reference information"
print "==================================="
print "For details on what files are used as QIIME's default references, see here:"
print " https://github.com/biocore/qiime-default-reference/releases/tag/%s" % qdr_lib_version
version_info = [
("QIIME library version", get_qiime_library_version()),
("QIIME script version", __version__),
("qiime-default-reference version", qdr_lib_version),
("NumPy version", numpy_lib_version),
("SciPy version", scipy_lib_version),
("pandas version", pandas_lib_version),
("matplotlib version", matplotlib_lib_version),
("biom-format version", biom_lib_version),
("h5py version", h5py_lib_version),
("qcli version", qcli_lib_version),
("pyqi version", pyqi_lib_version),
("scikit-bio version", skbio_lib_version),
("PyNAST version", pynast_lib_version),
("Emperor version", emperor_lib_version),
("burrito version", burrito_lib_version),
("burrito-fillings version", bfillings_lib_version),
("sortmerna version", sortmerna_lib_version),
("sumaclust version", sumaclust_lib_version),
("swarm version", swarm_lib_version),
("gdata", gdata_installed)
]
if qiime_full_install:
version_info += [
("RDP Classifier version (if installed)", rdp_version),
("Java version (if installed)", java_version)]
max_len = max([len(e[0]) for e in version_info])
print "\nDependency versions"
print "==================="
for v in version_info:
print "%*s:\t%s" % (max_len, v[0], v[1])
print "\nQIIME config values"
print "==================="
print "For definitions of these settings and to learn how to configure QIIME, see here:"
print " http://qiime.org/install/qiime_config.html"
print " http://qiime.org/tutorials/parallel_qiime.html\n"
max_len = max([len(key) for key in qiime_config])
for key, value in qiime_config.items():
print "%*s:\t%s" % (max_len, key, value)
if test:
if qiime_full_install:
print "\nQIIME full install test results"
print "==============================="
suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyFull)
else:
print "\nQIIME base install test results"
print "==============================="
suite = TestLoader().loadTestsFromTestCase(QIIMEDependencyBase)
if opts.verbose:
verbosity = 2
else:
verbosity = 1
TextTestRunner(stream=stdout, verbosity=verbosity).run(suite)
if __name__ == "__main__":
main()
| gpl-2.0 |
anirudhjayaraman/scikit-learn | sklearn/ensemble/voting_classifier.py | 178 | 8006 | """
Soft Voting/Majority Rule classifier.
This module contains a Soft Voting/Majority Rule classifier for
classification estimators.
"""
# Authors: Sebastian Raschka <se.raschka@gmail.com>,
# Gilles Louppe <g.louppe@gmail.com>
#
# Licence: BSD 3 clause
import numpy as np
from ..base import BaseEstimator
from ..base import ClassifierMixin
from ..base import TransformerMixin
from ..base import clone
from ..preprocessing import LabelEncoder
from ..externals import six
class VotingClassifier(BaseEstimator, ClassifierMixin, TransformerMixin):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
Parameters
----------
estimators : list of (string, estimator) tuples
Invoking the `fit` method on the `VotingClassifier` will fit clones
of those original estimators that will be stored in the class attribute
`self.estimators_`.
voting : str, {'hard', 'soft'} (default='hard')
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probalities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like, shape = [n_classifiers], optional (default=`None`)
Sequence of weights (`float` or `int`) to weight the occurances of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
Attributes
----------
classes_ : array-like, shape = [n_predictions]
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier
>>> clf1 = LogisticRegression(random_state=1)
>>> clf2 = RandomForestClassifier(random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1])
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>>
"""
def __init__(self, estimators, voting='hard', weights=None):
self.estimators = estimators
self.named_estimators = dict(estimators)
self.voting = voting
self.weights = weights
def fit(self, X, y):
""" Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
"""
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError('Multilabel and multi-output'
' classification is not supported.')
if self.voting not in ('soft', 'hard'):
raise ValueError("Voting must be 'soft' or 'hard'; got (voting=%r)"
% self.voting)
if self.weights and len(self.weights) != len(self.estimators):
raise ValueError('Number of classifiers and weights must be equal'
'; got %d weights, %d estimators'
% (len(self.weights), len(self.estimators)))
self.le_ = LabelEncoder()
self.le_.fit(y)
self.classes_ = self.le_.classes_
self.estimators_ = []
for name, clf in self.estimators:
fitted_clf = clone(clf).fit(X, self.le_.transform(y))
self.estimators_.append(fitted_clf)
return self
def predict(self, X):
""" Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
maj : array-like, shape = [n_samples]
Predicted class labels.
"""
if self.voting == 'soft':
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(lambda x:
np.argmax(np.bincount(x,
weights=self.weights)),
axis=1,
arr=predictions)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _predict_proba(self, X):
"""Predict class probabilities for X in 'soft' voting """
avg = np.average(self._collect_probas(X), axis=0, weights=self.weights)
return avg
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
----------
avg : array-like, shape = [n_samples, n_classes]
Weighted average probability for each class per sample.
"""
if self.voting == 'hard':
raise AttributeError("predict_proba is not available when"
" voting=%r" % self.voting)
return self._predict_proba
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
If `voting='soft'`:
array-like = [n_classifiers, n_samples, n_classes]
Class probabilties calculated by each classifier.
If `voting='hard'`:
array-like = [n_classifiers, n_samples]
Class labels predicted by each classifier.
"""
if self.voting == 'soft':
return self._collect_probas(X)
else:
return self._predict(X)
def get_params(self, deep=True):
"""Return estimator parameter names for GridSearch support"""
if not deep:
return super(VotingClassifier, self).get_params(deep=False)
else:
out = super(VotingClassifier, self).get_params(deep=False)
out.update(self.named_estimators.copy())
for name, step in six.iteritems(self.named_estimators):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
return out
def _predict(self, X):
"""Collect results from clf.predict calls. """
return np.asarray([clf.predict(X) for clf in self.estimators_]).T
| bsd-3-clause |
mblondel/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
leogulus/pisco_pipeline | run_rgb_pisco.py | 1 | 6622 | import os
import subprocess
import shlex
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.cosmology import Planck15 as cosmo
from astropy.visualization import make_lupton_rgb
import aplpy
def list_file_name(dir, name, end=0):
"""
list_file_name: list all filename which started with 'name' and end with
'end' in 'dir' directory
INPUT:
- dir: directory to search in
- name: begining of the file name
- end: ending of the file name
OUTPUT:
- list of all filename in that directory
"""
names = []
for file in os.listdir(dir):
if file.startswith(name):
if end == 0:
names.append(os.path.join(dir, file))
else:
if file.endswith(end):
names.append(os.path.join(dir, file))
if len(names) == 0:
print 'Cannot find the files'
return names
def make_images_jpeg(name, redshift):
aplpy.make_rgb_cube(['/Users/taweewat/Documents/pisco_code/final/coadd_c%s_i.fits' % name, '/Users/taweewat/Documents/pisco_code/final/coadd_c%s_r.fits' % name,
'/Users/taweewat/Documents/pisco_code/final/coadd_c%s_g.fits' % name], 'test.fits', north=True)
g = fits.open('test.fits')[0].data[2]
g_nonan = g.ravel()[~np.isnan(g.ravel())]
gmean = np.mean(g_nonan[np.abs(g_nonan) < 150])
gstd = np.std(g_nonan[np.abs(g_nonan) < 150])
g = (g - gmean) / gstd#* 2
gmin=(gmean-gstd)/gstd
r = fits.open('test.fits')[0].data[1]
r_nonan = r.ravel()[~np.isnan(r.ravel())]
rmean = np.mean(r_nonan[np.abs(r_nonan) < 150])
rstd = np.std(r_nonan[np.abs(r_nonan) < 150])
r = (r - rmean) / rstd#* 2
rmin=(rmean-rstd)/rstd
i = fits.open('test.fits')[0].data[0]
i_nonan = i.ravel()[~np.isnan(i.ravel())]
imean = np.mean(i_nonan[np.abs(i_nonan) < 150])
istd = np.std(i_nonan[np.abs(i_nonan) < 150])
i = (i - imean) / istd * 1.3
imin=(imean-istd)/istd
cmd = "rm test.fits"; print cmd; sub = subprocess.check_call(shlex.split(cmd))
# rgb_default = make_lupton_rgb(i, r, g, Q=7, stretch=4)
rgb_default = make_lupton_rgb(i, r, g, Q=6, stretch=7, minimum=[imin,rmin,gmin])
fig, ax = plt.subplots(1, figsize=(20, 30))
ax.imshow(rgb_default, origin='lower')
# ax.plot([200, 328.57], [820, 820], color='white', lw=3)
# ax.annotate('30"', xy=(200, 800 - 28), xycoords='data',
# color='white', fontsize=15)
#
# if redshift == -1:
# ax.annotate('no z', xy=(200, 800 + 33),
# xycoords='data', color='white', fontsize=14)
# else:
# kpc = 30 * (1 / cosmo.arcsec_per_kpc_proper(redshift)).value
# ax.annotate('z=%.2f: %.0f kpc' % (redshift, kpc), xy=(
# 200, 800 + 33), xycoords='data', color='white', fontsize=14);
# ax.axis([100,1570,730,1700])
ax.axis([0,1600,400,2100])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.axvline(800, c='lightgreen',ls='--')
plt.axhline(1250, c='lightgreen',ls='--')
plt.tight_layout()
plt.savefig('/Users/taweewat/Documents/pisco_code/Chips_images/large_%s_img_.jpeg' %
name, bbox_inches='tight')#,dpi='figure')
return 0
# name='CHIPS0022-5140'
# Problems
# CHIPS1011-0505: the i band does not have a correct wcs, the g band is extremely faint
# CHIPS2357-1125
# fixed 35 CHIPS2357-1125: still don't know the problem
# fixed 36 CHIPS0140-1533: still don't know the problem #work!!
# fixed 59 CHIPS0152-5028: very bad alignment, run wcs again
# fixed 118 CHIPS0106-2358: stil bad alignment
# 43 CHIPS1011-0505: still don't know the problem (can't do the alignment properly), not working, just can't align it
# 102 CHIPS0132-1608: bad alignment, the star is way too bright to get any good measurement
# New: CHIPS0107-1310, CHIPS0050-5249, CHIPS0112-2919, CHIPS0116-1136, CHIPS0127-4016
# Not: CHIPS0132-1608,
if __name__ == "__main__":
mode = 'chips'
if mode == 'chips':
allnames = list_file_name(
'/Users/taweewat/Documents/pisco_code/final/', 'coadd_cCHIPS', end='i.fits')
base = pd.read_csv(
'/Users/taweewat/Documents/red_sequence/chips_all_obj.csv', index_col=0)
elif mode == 'field':
allnames = list_file_name(
'/Users/taweewat/Documents/pisco_code/final/', 'coadd_cField', end='i.fits')
base = pd.read_csv('/Users/taweewat/Dropbox/Documents/MIT/Observation/2017_1/all_objs.csv')
all_names = list(set([i.split('/')[-1][7:-7] for i in allnames]))
print 'the total number of objects:', len(all_names)
# chips='CHIPS0229-5232'
# print chips, all_names[0]
# print np.where(np.array(all_names) == chips)[0][0]; ind=np.where(np.array(all_names) == chips)[0][0]
# field='Field060'
# print field, all_names[0]
# print np.where(np.array(all_names) == field)[0][0]; ind=np.where(np.array(all_names) == field)[0][0]
#DONE: 'Field018','Field020','Field021','Field022','Field025','Field026','Field027']
# all_names=['CHIPS0609-0247']
# 'Field028','Field029','Field030','Field033','Field034','Field036','Field037','Field038','Field039','Field040','Field042','Field044','Field045']
for i, name in enumerate(np.append(all_names[:43],all_names[44:])):
# for i, name in enumerate(all_names[ind:ind+1]):
if mode == 'chips':
redshift = base[base.chips == name].redshift.values[0]
RA = base[base.chips == name].ra.values[0]
DEC = base[base.chips == name].dec.values[0]
elif mode == 'field':
redshift = base[base.name == name].redshift.values[0]
RA = base[base.name == name].ra.values[0]
DEC = base[base.name == name].dec.values[0]
print i, name
## make image with ds9 with the center is crosshaired
# cmd = "ds9 -zscale -crosshair %f %f wcs fk5 -rgb -red final/coadd_c%s_i.fits -green final/coadd_c%s_r.fits -blue final/coadd_c%s_g.fits -zoom out -saveimage Chips_images/%s_ds9.eps -exit" % \
# (RA, DEC, name, name, name, name)
# print cmd
# sub = subprocess.check_call(shlex.split(cmd))
## make jpeg image from the python script with the scale size
if not os.path.isfile(os.path.join('Chips_images', 'large_%s_img_.jpeg'%name)):
print i, 'working on the the image large_%s_img_.jpeg'%name
a=make_images_jpeg(name, redshift)
# else:
# make_images_jpeg(name, redshift)
| mit |
manahl/arctic | tests/unit/serialization/test_incremental.py | 1 | 6189 | import itertools
import pytest
from arctic.exceptions import ArcticSerializationException
from arctic.serialization.incremental import IncrementalPandasToRecArraySerializer
from arctic.serialization.numpy_records import DataFrameSerializer
from tests.unit.serialization.serialization_test_data import _mixed_test_data, is_test_data_serializable
_CHUNK_SIZE = 2 * 1024 * 1024 - 2048
NON_HOMOGENEOUS_DTYPE_PATCH_SIZE_ROWS = 50
_TEST_DATA = None
df_serializer = DataFrameSerializer()
def test_incremental_bad_init():
with pytest.raises(ArcticSerializationException):
IncrementalPandasToRecArraySerializer(df_serializer, 'hello world', chunk_size=_CHUNK_SIZE)
with pytest.raises(ArcticSerializationException):
IncrementalPandasToRecArraySerializer(df_serializer, 1234, chunk_size=_CHUNK_SIZE)
with pytest.raises(ArcticSerializationException):
IncrementalPandasToRecArraySerializer(df_serializer, _mixed_test_data()['small'][0], chunk_size=0)
with pytest.raises(ArcticSerializationException):
IncrementalPandasToRecArraySerializer(df_serializer, _mixed_test_data()['small'][0], chunk_size=-1)
with pytest.raises(ArcticSerializationException):
IncrementalPandasToRecArraySerializer(df_serializer, _mixed_test_data()['small'][0], chunk_size=_CHUNK_SIZE, string_max_len=-1)
def test_none_df():
with pytest.raises(ArcticSerializationException):
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, None, chunk_size=_CHUNK_SIZE)
incr_ser.serialize()
with pytest.raises(ArcticSerializationException):
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, None, chunk_size=_CHUNK_SIZE)
incr_ser.generator_bytes()
@pytest.mark.parametrize("input_df_descr", _mixed_test_data().keys())
def test_serialize_pandas_to_recarray(input_df_descr):
if not is_test_data_serializable(input_df_descr):
return
df = _mixed_test_data()[input_df_descr][0]
expectation = _mixed_test_data()[input_df_descr][1]
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, df, chunk_size=_CHUNK_SIZE)
if not isinstance(expectation, tuple) and issubclass(expectation, Exception):
with pytest.raises(expectation):
[chunk for chunk, _, _, _ in incr_ser.generator_bytes()]
else:
incr_ser_data, incr_ser_dtype = incr_ser.serialize()
matching = expectation[0].tostring() == incr_ser_data.tostring()
assert matching
assert expectation[1] == incr_ser_dtype
@pytest.mark.parametrize("input_df_descr", _mixed_test_data().keys())
def test_serialize_incremental_pandas_to_recarray(input_df_descr):
if not is_test_data_serializable(input_df_descr):
return
df = _mixed_test_data()[input_df_descr][0]
expectation = _mixed_test_data()[input_df_descr][1]
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, df, chunk_size=_CHUNK_SIZE)
if not isinstance(expectation, tuple) and issubclass(expectation, Exception):
with pytest.raises(expectation):
[chunk for chunk, _, _, _ in incr_ser.generator_bytes()]
else:
chunk_bytes = [chunk_b for chunk_b, _, _, _ in incr_ser.generator_bytes()]
matching = expectation[0].tostring() == b''.join(chunk_bytes)
assert matching
assert expectation[1] == incr_ser.dtype
@pytest.mark.parametrize("input_df_descr", _mixed_test_data().keys())
def test_serialize_incremental_chunk_size_pandas_to_recarray(input_df_descr):
if not is_test_data_serializable(input_df_descr):
return
df = _mixed_test_data()[input_df_descr][0]
expectation = _mixed_test_data()[input_df_descr][1]
if not isinstance(expectation, tuple) and issubclass(expectation, Exception):
for div in (1, 4, 8):
chunk_size = div * 8 * 1024 ** 2
with pytest.raises(expectation):
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, df, chunk_size=chunk_size)
[chunk for chunk, _, _, _ in incr_ser.generator_bytes()]
return
for div in (1, 4, 8):
chunk_size = div * 8 * 1024 ** 2
if input_df_descr is not None and len(expectation) > 0:
row_size = int(expectation[0].dtype.itemsize)
chunk_size = NON_HOMOGENEOUS_DTYPE_PATCH_SIZE_ROWS * row_size / div
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, df, chunk_size=chunk_size)
chunk_bytes = [chunk for chunk, _, _, _ in incr_ser.generator_bytes()]
matching = expectation[0].tostring() == b''.join(chunk_bytes)
assert matching
assert expectation[1] == incr_ser.dtype
@pytest.mark.parametrize("input_df_descr", _mixed_test_data().keys())
def test_shape(input_df_descr):
if not is_test_data_serializable(input_df_descr):
return
df = _mixed_test_data()[input_df_descr][0]
expectation = _mixed_test_data()[input_df_descr][1]
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, df, chunk_size=_CHUNK_SIZE)
if not isinstance(expectation, tuple) and issubclass(expectation, Exception):
with pytest.raises(expectation):
[chunk for chunk, _, _, _ in incr_ser.shape]
else:
assert incr_ser.shape == expectation[0].shape
@pytest.mark.parametrize("from_idx, to_idx",
[(x, y) for (x, y) in itertools.product(range(-10, len(_mixed_test_data()['large'][0])+100, 500),
range(-10, len(_mixed_test_data()['large'][0])+100, 500))
if x <= y]
)
def test_generator_bytes_range(from_idx, to_idx):
# Tests also negative indexing
df = _mixed_test_data()['large'][0]
expectation = _mixed_test_data()['large'][1]
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, df, chunk_size=_CHUNK_SIZE)
chunk_bytes = [chunk_b for chunk_b, _, _, _ in incr_ser.generator_bytes(from_idx=from_idx, to_idx=to_idx)]
matching = expectation[0][from_idx:to_idx].tostring() == b''.join(chunk_bytes)
assert matching
assert expectation[1] == incr_ser.dtype
| lgpl-2.1 |
masa-ito/ProtoToMET | src/test/benchmarkPlot.py | 1 | 1992 | import numpy as np
import matplotlib.pyplot as plt
# Create a figure of size 8x6 inches, 80 dots per inch
plt.figure(figsize=(8, 6), dpi=80)
# Create a new subplot from a grid of 1x1
plt.subplot(1, 1, 1)
threadNums = [ 1, 2, 4,
8, 12, 16,
20, 24];
# elasped time (milisecond) for conjugate gradient
elapsedNonMeta = [ 3000.91, 1560.39, 832.975,
507.925, 430.125, 451.413,
392.125, 434.75]
elapsedMeta = [ 2929.45, 1544.96, 838.275,
515.638, 432.825, 402.375,
407.012, 459.425]
# the num. of doing conjugate gradient method in a second
speedNonMeta = [ 1000.0 / t for t in elapsedNonMeta ]
speedMeta = [ 1000.0 / t for t in elapsedMeta ]
threadNumsFromZero = range(0, threadNums[-1]+1)
speed = [ ( speedNonMeta[0] + speedMeta[0] ) /2.0 * th
for th in threadNumsFromZero ]
#plt.plot(X, C, color="blue", linewidth=1.0, linestyle="--")
plt.plot(threadNums, speedMeta,
color="blue", linewidth=3.0, linestyle="-",
marker="o", markersize=10.0,
label="EDSL")
plt.plot(threadNums, speedNonMeta,
color="red", linewidth=3.0, linestyle=":",
marker="^", markersize=10.0,
label="Non-OOP")
plt.plot( threadNumsFromZero, speed,
color="green", linewidth=3.0, linestyle="--",
label="Linear")
ax = plt.gca()
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_fontsize(24)
#label.set_bbox(dict(facecolor='white', edgecolor='None', alpha=0.65))
plt.legend(loc='upper left', fontsize=28)
#plt.legend(loc='lower right', fontsize=28)
# Set x limits
#plt.xlim(0.0, 24.0)
# Set x ticks
#plt.xticks([0, 4, 8, 16, 20, 24])
# Set y limits
#plt.ylim(0.000, 0.003)
# Set y ticks
plt.yticks([0.0, 2.5, 5.0, 7.5])
# Save figure using 72 dots per inch
plt.savefig("benchmarkPlot20160529.png", dpi=200)
plt.savefig("benchmarkPlot20160529.jpg", dpi=200)
# Show result on screen
plt.show()
| lgpl-3.0 |
Averroes/statsmodels | statsmodels/tools/print_version.py | 23 | 7951 | #!/usr/bin/env python
from __future__ import print_function
from statsmodels.compat.python import reduce
import sys
from os.path import dirname
def safe_version(module, attr='__version__'):
if not isinstance(attr, list):
attr = [attr]
try:
return reduce(getattr, [module] + attr)
except AttributeError:
return "Cannot detect version"
def _show_versions_only():
print("\nINSTALLED VERSIONS")
print("------------------")
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
try:
import os
(sysname, nodename, release, version, machine) = os.uname()
print("OS: %s %s %s %s" % (sysname, release, version, machine))
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
print("LANG: %s" % os.environ.get('LANG', "None"))
except:
pass
try:
from statsmodels import version
has_sm = True
except ImportError:
has_sm = False
print('\nStatsmodels\n===========\n')
if has_sm:
print('Installed: %s' % safe_version(version, 'full_version'))
else:
print('Not installed')
print("\nRequired Dependencies\n=====================\n")
try:
import Cython
print("cython: %s" % safe_version(Cython))
except ImportError:
print("cython: Not installed")
try:
import numpy
print("numpy: %s" % safe_version(numpy, ['version', 'version']))
except ImportError:
print("numpy: Not installed")
try:
import scipy
print("scipy: %s" % safe_version(scipy, ['version', 'version']))
except ImportError:
print("scipy: Not installed")
try:
import pandas
print("pandas: %s" % safe_version(pandas, ['version', 'version']))
except ImportError:
print("pandas: Not installed")
try:
import dateutil
print(" dateutil: %s" % safe_version(dateutil))
except ImportError:
print(" dateutil: not installed")
try:
import patsy
print("patsy: %s" % safe_version(patsy))
except ImportError:
print("patsy: Not installed")
print("\nOptional Dependencies\n=====================\n")
try:
import matplotlib as mpl
print("matplotlib: %s" % safe_version(mpl))
except ImportError:
print("matplotlib: Not installed")
try:
from cvxopt import info
print("cvxopt: %s" % safe_version(info, 'version'))
except ImportError:
print("cvxopt: Not installed")
print("\nDeveloper Tools\n================\n")
try:
import IPython
print("IPython: %s" % safe_version(IPython))
except ImportError:
print("IPython: Not installed")
try:
import jinja2
print(" jinja2: %s" % safe_version(jinja2))
except ImportError:
print(" jinja2: Not installed")
try:
import sphinx
print("sphinx: %s" % safe_version(sphinx))
except ImportError:
print("sphinx: Not installed")
try:
import pygments
print(" pygments: %s" % safe_version(pygments))
except ImportError:
print(" pygments: Not installed")
try:
import nose
print("nose: %s" % safe_version(nose))
except ImportError:
print("nose: Not installed")
try:
import virtualenv
print("virtualenv: %s" % safe_version(virtualenv))
except ImportError:
print("virtualenv: Not installed")
print("\n")
def show_versions(show_dirs=True):
if not show_dirs:
_show_versions_only()
print("\nINSTALLED VERSIONS")
print("------------------")
print("Python: %d.%d.%d.%s.%s" % sys.version_info[:])
try:
import os
(sysname, nodename, release, version, machine) = os.uname()
print("OS: %s %s %s %s" % (sysname, release, version, machine))
print("byteorder: %s" % sys.byteorder)
print("LC_ALL: %s" % os.environ.get('LC_ALL', "None"))
print("LANG: %s" % os.environ.get('LANG', "None"))
except:
pass
try:
import statsmodels
from statsmodels import version
has_sm = True
except ImportError:
has_sm = False
print('\nStatsmodels\n===========\n')
if has_sm:
print('Installed: %s (%s)' % (safe_version(version, 'full_version'),
dirname(statsmodels.__file__)))
else:
print('Not installed')
print("\nRequired Dependencies\n=====================\n")
try:
import Cython
print("cython: %s (%s)" % (safe_version(Cython),
dirname(Cython.__file__)))
except ImportError:
print("cython: Not installed")
try:
import numpy
print("numpy: %s (%s)" % (safe_version(numpy, ['version', 'version']),
dirname(numpy.__file__)))
except ImportError:
print("numpy: Not installed")
try:
import scipy
print("scipy: %s (%s)" % (safe_version(scipy, ['version', 'version']),
dirname(scipy.__file__)))
except ImportError:
print("scipy: Not installed")
try:
import pandas
print("pandas: %s (%s)" % (safe_version(pandas, ['version',
'version']),
dirname(pandas.__file__)))
except ImportError:
print("pandas: Not installed")
try:
import dateutil
print(" dateutil: %s (%s)" % (safe_version(dateutil),
dirname(dateutil.__file__)))
except ImportError:
print(" dateutil: not installed")
try:
import patsy
print("patsy: %s (%s)" % (safe_version(patsy),
dirname(patsy.__file__)))
except ImportError:
print("patsy: Not installed")
print("\nOptional Dependencies\n=====================\n")
try:
import matplotlib as mpl
print("matplotlib: %s (%s)" % (safe_version(mpl),
dirname(mpl.__file__)))
except ImportError:
print("matplotlib: Not installed")
try:
from cvxopt import info
print("cvxopt: %s (%s)" % (safe_version(info, 'version'),
dirname(info.__file__)))
except ImportError:
print("cvxopt: Not installed")
print("\nDeveloper Tools\n================\n")
try:
import IPython
print("IPython: %s (%s)" % (safe_version(IPython),
dirname(IPython.__file__)))
except ImportError:
print("IPython: Not installed")
try:
import jinja2
print(" jinja2: %s (%s)" % (safe_version(jinja2),
dirname(jinja2.__file__)))
except ImportError:
print(" jinja2: Not installed")
try:
import sphinx
print("sphinx: %s (%s)" % (safe_version(sphinx),
dirname(sphinx.__file__)))
except ImportError:
print("sphinx: Not installed")
try:
import pygments
print(" pygments: %s (%s)" % (safe_version(pygments),
dirname(pygments.__file__)))
except ImportError:
print(" pygments: Not installed")
try:
import nose
print("nose: %s (%s)" % (safe_version(nose), dirname(nose.__file__)))
except ImportError:
print("nose: Not installed")
try:
import virtualenv
print("virtualenv: %s (%s)" % (safe_version(virtualenv),
dirname(virtualenv.__file__)))
except ImportError:
print("virtualenv: Not installed")
print("\n")
if __name__ == "__main__":
show_versions()
| bsd-3-clause |
woozzu/tf_tutorials | 01_linear_regression_starter.py | 1 | 1652 | """
Simple linear regression example in TensorFlow
This program tries to predict the number of thefts from
the number of fire in the city of Chicago
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import csv
DATA_FILE = 'data/fire_theft.csv'
# Step 1: read data
with open(DATA_FILE, 'r') as f:
data = []
reader = csv.reader(f, delimiter=',')
for i, row in enumerate(reader):
if i == 0:
continue
data.append(row)
n_samples = len(data)
data = np.asarray(data, dtype='float32')
# Step 2: create placeholders for input X (number of fire) and label Y (number of theft)
# YOUR CODE HERE
# Step 3: create weight and bias, initialized to 0
# YOUR CODE HERE
# Step 4: build model to predict Y
# YOUR CODE HERE
# Step 5: use the square error as the loss function
# YOUR CODE HERE
# Step 6: using gradient descent with learning rate of 0.001 to minimize loss
# YOUR CODE HERE
with tf.Session() as sess:
# Step 7: initialize the necessary variables, in this case, w and b
# YOUR CODE HERE
# Step 8: train the model
for i in range(100): # train the model 100 times
total_loss = 0
for x, y in data:
# Session runs train_op and fetch values of loss
# YOUR CODE HERE
total_loss += l
print('Epoch {0}: {1}'.format(i, total_loss / n_samples))
# Step 9: output the values of w and b
w_value, b_value = sess.run([w, b])
# plot the results
X, Y = data.T[0], data.T[1]
plt.plot(X, Y, 'bo', label='Real data')
plt.plot(X, X * w_value + b_value, 'r', label='Predicted data')
plt.legend()
plt.show()
| mit |
michigraber/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
kiyoto/statsmodels | tools/code_maintenance.py | 37 | 2307 | """
Code maintenance script modified from PyMC
"""
#!/usr/bin/env python
import sys
import os
# This is a function, not a test case, because it has to be run from inside
# the source tree to work well.
mod_strs = ['IPython', 'pylab', 'matplotlib', 'scipy','Pdb']
dep_files = {}
for mod_str in mod_strs:
dep_files[mod_str] = []
def remove_whitespace(fname):
# Remove trailing whitespace
fd = open(fname,mode='U') # open in universal newline mode
lines = []
for line in fd.readlines():
lines.append( line.rstrip() )
fd.close()
fd = open(fname,mode='w')
fd.seek(0)
for line in lines:
fd.write(line+'\n')
fd.close()
# print 'Removed whitespace from %s'%fname
def find_whitespace(fname):
fd = open(fname, mode='U')
for line in fd.readlines():
#print repr(line)
if ' \n' in line:
print fname
break
# print
print_only = True
# ====================
# = Strip whitespace =
# ====================
for dirname, dirs, files in os.walk('.'):
if dirname[1:].find('.')==-1:
# print dirname
for fname in files:
if fname[-2:] in ['c', 'f'] or fname[-3:]=='.py' or fname[-4:] in ['.pyx', '.txt', '.tex', '.sty', '.cls'] or fname.find('.')==-1:
# print fname
if print_only:
find_whitespace(dirname + '/' + fname)
else:
remove_whitespace(dirname + '/' + fname)
"""
# ==========================
# = Check for dependencies =
# ==========================
for dirname, dirs, files in os.walk('pymc'):
for fname in files:
if fname[-3:]=='.py' or fname[-4:]=='.pyx':
if dirname.find('sandbox')==-1 and fname != 'test_dependencies.py'\
and dirname.find('examples')==-1:
for mod_str in mod_strs:
if file(dirname+'/'+fname).read().find(mod_str)>=0:
dep_files[mod_str].append(dirname+'/'+fname)
print 'Instances of optional dependencies found are:'
for mod_str in mod_strs:
print '\t'+mod_str+':'
for fname in dep_files[mod_str]:
print '\t\t'+fname
if len(dep_files['Pdb'])>0:
raise ValueError, 'Looks like Pdb was not commented out in '+', '.join(dep_files[mod_str])
"""
| bsd-3-clause |
etkirsch/scikit-learn | examples/applications/plot_prediction_latency.py | 234 | 11277 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[i, :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[0])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
xubenben/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
wangwei7175878/tutorials | matplotlibTUT/plt19_animation.py | 3 | 1573 | # View more python tutorials on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
# 19 - animation
"""
Please note, this script is for python3+.
If you are using python2+, please modify it accordingly.
Tutorial reference:
http://matplotlib.org/examples/animation/simple_anim.html
More animation example code:
http://matplotlib.org/examples/animation/
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
fig, ax = plt.subplots()
x = np.arange(0, 2*np.pi, 0.01)
line, = ax.plot(x, np.sin(x))
def animate(i):
line.set_ydata(np.sin(x + i/10.0)) # update the data
return line,
# Init only required for blitting to give a clean slate.
def init():
line.set_ydata(np.sin(x))
return line,
# call the animator. blit=True means only re-draw the parts that have changed.
# blit=True dose not work on Mac, set blit=False
# interval= update frequency
ani = animation.FuncAnimation(fig=fig, func=animate, frames=100, init_func=init,
interval=20, blit=False)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
# anim.save('basic_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
plt.show() | mit |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/sklearn/semi_supervised/label_propagation.py | 12 | 18811 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semi-supervised classification algorithms. At a high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset given
label assignments over an initial subset. In one variant, the algorithm does
not allow for any errors in the initial assignment (hard-clamping) while
in another variant, the algorithm allows for some wiggle room for the initial
assignments, allowing them to change by a fraction alpha in each iteration
(soft-clamping).
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supports RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Utkarsh Upadhyay <mail@musicallyut.in>
# License: BSD
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
from ..exceptions import ConvergenceWarning
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : integer
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
elif callable(self.kernel):
if y is None:
return self.kernel(X, X)
else:
return self.kernel(X, y)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" or an explicit function "
" are supported at this time." % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
alpha = self.alpha
if self._variant == 'spreading' and \
(alpha is None or alpha <= 0.0 or alpha >= 1.0):
raise ValueError('alpha=%s is invalid: it must be inside '
'the open interval (0, 1)' % alpha)
y = np.asarray(y)
unlabeled = y == -1
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self._variant == 'propagation':
# LabelPropagation
y_static[unlabeled] = 0
else:
# LabelSpreading
y_static *= 1 - alpha
l_previous = np.zeros((self.X_.shape[0], n_classes))
unlabeled = unlabeled[:, np.newaxis]
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
for self.n_iter_ in range(self.max_iter):
if np.abs(self.label_distributions_ - l_previous).sum() < self.tol:
break
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
if self._variant == 'propagation':
normalizer = np.sum(
self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
self.label_distributions_ = np.where(unlabeled,
self.label_distributions_,
y_static)
else:
# clamp
self.label_distributions_ = np.multiply(
alpha, self.label_distributions_) + y_static
else:
warnings.warn(
'max_iter=%d was reached without convergence.' % self.max_iter,
category=ConvergenceWarning
)
self.n_iter_ += 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix.
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor.
.. deprecated:: 0.19
This parameter will be removed in 0.21.
'alpha' is fixed to zero in 'LabelPropagation'.
max_iter : integer
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
_variant = 'propagation'
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=None, max_iter=1000, tol=1e-3, n_jobs=1):
super(LabelPropagation, self).__init__(
kernel=kernel, gamma=gamma, n_neighbors=n_neighbors, alpha=alpha,
max_iter=max_iter, tol=tol, n_jobs=n_jobs)
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
def fit(self, X, y):
if self.alpha is not None:
warnings.warn(
"alpha is deprecated since 0.19 and will be removed in 0.21.",
DeprecationWarning
)
self.alpha = None
return super(LabelPropagation, self).fit(X, y)
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propagation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
Clamping factor. A value in [0, 1] that specifies the relative amount
that an instance should adopt the information from its neighbors as
opposed to its initial label.
alpha=0 means keeping the initial label information; alpha=1 means
replacing all initial information.
max_iter : integer
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
_variant = 'spreading'
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = sparse.csgraph.laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| mit |
vermouthmjl/scikit-learn | examples/plot_isotonic_regression.py | 303 | 1767 | """
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data. The
isotonic regression finds a non-decreasing approximation of a function
while minimizing the mean squared error on the training data. The benefit
of such a model is that it does not assume any form for the target
function such as linearity. For comparison a linear regression is also
presented.
"""
print(__doc__)
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
###############################################################################
# Fit IsotonicRegression and LinearRegression models
ir = IsotonicRegression()
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
###############################################################################
# plot result
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(0.5 * np.ones(n))
fig = plt.figure()
plt.plot(x, y, 'r.', markersize=12)
plt.plot(x, y_, 'g.-', markersize=12)
plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-')
plt.gca().add_collection(lc)
plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right')
plt.title('Isotonic regression')
plt.show()
| bsd-3-clause |
jseabold/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
bhargav/scikit-learn | examples/linear_model/plot_huber_vs_ridge.py | 127 | 2206 | """
=======================================================
HuberRegressor vs Ridge on dataset with strong outliers
=======================================================
Fit Ridge and HuberRegressor on a dataset with outliers.
The example shows that the predictions in ridge are strongly influenced
by the outliers present in the dataset. The Huber regressor is less
influenced by the outliers since the model uses the linear loss for these.
As the parameter epsilon is increased for the Huber regressor, the decision
function approaches that of the ridge.
"""
# Authors: Manoj Kumar mks542@nyu.edu
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_regression
from sklearn.linear_model import HuberRegressor, Ridge
# Generate toy data.
rng = np.random.RandomState(0)
X, y = make_regression(n_samples=20, n_features=1, random_state=0, noise=4.0,
bias=100.0)
# Add four strong outliers to the dataset.
X_outliers = rng.normal(0, 0.5, size=(4, 1))
y_outliers = rng.normal(0, 2.0, size=4)
X_outliers[:2, :] += X.max() + X.mean() / 4.
X_outliers[2:, :] += X.min() - X.mean() / 4.
y_outliers[:2] += y.min() - y.mean() / 4.
y_outliers[2:] += y.max() + y.mean() / 4.
X = np.vstack((X, X_outliers))
y = np.concatenate((y, y_outliers))
plt.plot(X, y, 'b.')
# Fit the huber regressor over a series of epsilon values.
colors = ['r-', 'b-', 'y-', 'm-']
x = np.linspace(X.min(), X.max(), 7)
epsilon_values = [1.35, 1.5, 1.75, 1.9]
for k, epsilon in enumerate(epsilon_values):
huber = HuberRegressor(fit_intercept=True, alpha=0.0, max_iter=100,
epsilon=epsilon)
huber.fit(X, y)
coef_ = huber.coef_ * x + huber.intercept_
plt.plot(x, coef_, colors[k], label="huber loss, %s" % epsilon)
# Fit a ridge regressor to compare it to huber regressor.
ridge = Ridge(fit_intercept=True, alpha=0.0, random_state=0, normalize=True)
ridge.fit(X, y)
coef_ridge = ridge.coef_
coef_ = ridge.coef_ * x + ridge.intercept_
plt.plot(x, coef_, 'g-', label="ridge regression")
plt.title("Comparison of HuberRegressor vs Ridge")
plt.xlabel("X")
plt.ylabel("y")
plt.legend(loc=0)
plt.show()
| bsd-3-clause |
jblackburne/scikit-learn | examples/manifold/plot_mds.py | 88 | 2731 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
ChanderG/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
franzpl/sweep | log_sweep_kaiser_window_script2/log_sweep_kaiser_window_script2.py | 2 | 2113 | #!/usr/bin/env python3
"""The influence of windowing of log. sweep signals when using a
Kaiser Window by fixing beta (=7) and fade_in (=0).
fstart = 1 Hz
fstop = 22050 Hz
"""
import sys
sys.path.append('..')
import measurement_chain
import plotting
import calculation
import generation
import matplotlib.pyplot as plt
import windows
from scipy.signal import lfilter, fftconvolve
import numpy as np
# Parameters of the measuring system
fs = 44100
fstart = 1
fstop = 22050
duration = 1
pad = 4
# Generate excitation signal
excitation = generation.log_sweep(fstart, fstop, duration, fs)
N = len(excitation)
# Noise in measurement chain
noise_level_db = -30.
noise = measurement_chain.additive_noise(noise_level_db)
# FIR-Filter-System
dirac_system = measurement_chain.convolution([1.0])
# Combinate system elements
system = measurement_chain.chained(dirac_system, noise)
# Lists
beta = 7
fade_in = 0
fade_out_list = np.arange(0, 1001, 1)
# Spectrum of dirac for reference
dirac = np.zeros(pad * fs)
dirac[0] = 1
dirac_f = np.fft.rfft(dirac)
def get_results(fade_out):
excitation_windowed = excitation * windows.window_kaiser(N,
fade_in,
fade_out,
fs, beta)
excitation_windowed_zeropadded = generation.zero_padding(
excitation_windowed, pad, fs)
system_response = system(excitation_windowed_zeropadded)
ir = calculation.deconv_process(excitation_windowed_zeropadded,
system_response,
fs)
return ir
with open("log_sweep_kaiser_window_script2.txt", "w") as f:
for fade_out in fade_out_list:
ir = get_results(fade_out)
pnr = calculation.pnr_db(ir[0], ir[1:4 * fs])
spectrum_distance = calculation.vector_distance(
dirac_f, np.fft.rfft(ir[:pad * fs]))
f.write(
str(fade_out) + " " + str(pnr) +
" " + str(spectrum_distance) + " \n")
| mit |
cmdunkers/DeeperMind | PythonEnv/lib/python2.7/site-packages/scipy/stats/_distn_infrastructure.py | 3 | 112844 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy._lib.six import string_types, exec_
from scipy._lib._util import getargspec_no_self as _getargspec
import sys
import keyword
import re
import types
import warnings
from scipy.misc import doccer
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy.special import (comb, chndtr, gammaln, hyp0f1,
entr, kl_div)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, take, ones, sum, shape,
product, reshape, zeros, floor, logical_and, log, sqrt, exp,
ndarray)
from numpy import (place, any, argsort, argmax, vectorize,
asarray, nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _EPS, _XMAX
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
``rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)``
Random variates.
"""
_doc_pdf = """\
``pdf(x, %(shapes)s, loc=0, scale=1)``
Probability density function.
"""
_doc_logpdf = """\
``logpdf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability density function.
"""
_doc_pmf = """\
``pmf(x, %(shapes)s, loc=0, scale=1)``
Probability mass function.
"""
_doc_logpmf = """\
``logpmf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability mass function.
"""
_doc_cdf = """\
``cdf(x, %(shapes)s, loc=0, scale=1)``
Cumulative density function.
"""
_doc_logcdf = """\
``logcdf(x, %(shapes)s, loc=0, scale=1)``
Log of the cumulative density function.
"""
_doc_sf = """\
``sf(x, %(shapes)s, loc=0, scale=1)``
Survival function (``1 - cdf`` --- sometimes more accurate).
"""
_doc_logsf = """\
``logsf(x, %(shapes)s, loc=0, scale=1)``
Log of the survival function.
"""
_doc_ppf = """\
``ppf(q, %(shapes)s, loc=0, scale=1)``
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
``isf(q, %(shapes)s, loc=0, scale=1)``
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
``moment(n, %(shapes)s, loc=0, scale=1)``
Non-central moment of order n
"""
_doc_stats = """\
``stats(%(shapes)s, loc=0, scale=1, moments='mv')``
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
``entropy(%(shapes)s, loc=0, scale=1)``
(Differential) entropy of the RV.
"""
_doc_fit = """\
``fit(data, %(shapes)s, loc=0, scale=1)``
Parameter estimates for generic data.
"""
_doc_expect = """\
``expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
``expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
``median(%(shapes)s, loc=0, scale=1)``
Median of the distribution.
"""
_doc_mean = """\
``mean(%(shapes)s, loc=0, scale=1)``
Mean of the distribution.
"""
_doc_var = """\
``var(%(shapes)s, loc=0, scale=1)``
Variance of the distribution.
"""
_doc_std = """\
``std(%(shapes)s, loc=0, scale=1)``
Standard deviation of the distribution.
"""
_doc_interval = """\
``interval(alpha, %(shapes)s, loc=0, scale=1)``
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._ctor_param)
# a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
self.a, self.b = self.dist.a, self.dist.b
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None,
conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
if kwds:
raise ValueError("Discrete expect does not accept **kwds.")
return self.dist.expect(func, a, loc, lb, ub, conditional)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def valarray(shape, value=nan, typecode=None):
"""Return an array of all value.
"""
out = ones(shape, dtype=bool) * value
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = asarray(out)
return out
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
"""
np.where(cond, x, fillvalue) always evaluates x even where cond is False.
This one only evaluates f(arr1[cond], arr2[cond], ...).
For example,
>>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
>>> def f(a, b):
return a*b
>>> _lazywhere(a > 2, (a, b), f, np.nan)
array([ nan, nan, 21., 32.])
Notice it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
"""
if fillvalue is None:
if f2 is None:
raise ValueError("One of (fillvalue, f2) must be given.")
else:
fillvalue = np.nan
else:
if f2 is not None:
raise ValueError("Only one of (fillvalue, f2) can be given.")
arrays = np.broadcast_arrays(*arrays)
temp = tuple(np.extract(cond, arr) for arr in arrays)
out = valarray(shape(arrays[0]), value=fillvalue)
np.place(out, cond, f(*temp))
if f2 is not None:
temp = tuple(np.extract(~cond, arr) for arr in arrays)
np.place(out, ~cond, f2(*temp))
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return (%(shape_arg_str)s), %(locscale_out)s, size
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# I think the function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
a = asarray(df/2.0)
fac = -nc/2.0 - x/2.0 + (a-1)*log(x) - a*log(2) - gammaln(a)
return fac + np.nan_to_num(log(hyp0f1(a, nc * x/4.0)))
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = _getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getargspec(meth) # NB: does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Central moments
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = self._random_state.random_sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = np.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
self._random_state = check_random_state(rndm)
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if np.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
mu3 = mu3p - 3 * mu * mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
mu3 = mu3p - 3 * mu * mu2 - mu**3
mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
# np.vectorize doesn't work when numargs == 0 in numpy 1.6.2. Once the
# lowest supported numpy version is >= 1.7.0, this special case can be
# removed (see gh-4314).
if self.numargs == 0:
place(output, cond0, self._entropy() + log(scale))
else:
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def moment(self, n, *args, **kwds):
"""
n-th order non-central moment of distribution.
Parameters
----------
n : int, n >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
stats.distributions.rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of a distribution, ``self.a <= x <= self.b``.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_continuous, self).__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self._size = 1
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
# backwards compat. these were removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vecfunc = np.deprecate(self._ppfvec, "vecfunc")
self.veccdf = np.deprecate(self._cdfvec, "veccdf")
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
return integrate.quad(self._mom_integ0, self.a, self.b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args), axis=0)
def nnlf(self, theta, x):
'''Return negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
cond0 = (x <= self.a) | (self.b <= x)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N * log(scale)
def _penalized_nnlf(self, theta, x):
''' Return negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0)
where theta are the parameters (including loc and scale)
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
loginf = log(_XMAX)
if np.isneginf(self.a).all() and np.isinf(self.b).all():
Nbad = 0
else:
cond0 = (x <= self.a) | (self.b <= x)
Nbad = sum(cond0)
if Nbad > 0:
x = argsreduce(~cond0, x)[0]
N = len(x)
return self._nnlf(x, *args) + N*log(scale) + Nbad * 100.0 * loginf
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
# First of all, convert fshapes params to fnum: eg for stats.beta,
# shapes='a, b'. To fix `a`, can specify either `f1` or `fa`.
# Convert the latter into the former.
if self.shapes:
fshapes = ['f%s' % s for s in self.shapes.replace(',', ' ').split()]
for j, fs in enumerate(fshapes):
if fs in kwds:
key = 'f%d' % j
if key in kwds:
raise ValueError("Cannot specify both %s and %s" %
(fs, key))
else:
kwds.update({key: kwds[fs]})
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds[key]
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa`` is equivalent to
``f0`` and ``fb`` is equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use. The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.get('loc', start[-2])
scale = kwds.get('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.get('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def _fit_loc_scale_support(self, data, *args):
"""
Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
a, b = self.a, self.b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
@np.deprecate
def est_loc_scale(self, data, *args):
"""This function is deprecated, use self.fit_loc_scale(data) instead.
"""
return self.fit_loc_scale(data, *args)
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
olderr = np.seterr(over='ignore')
h = integrate.quad(integ, self.a, self.b)[0]
np.seterr(**olderr)
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ubound
E[x] = Integral(f(x) * dist.pdf(x))
lbound
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`integrate.quad`.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
np.seterr(**olderr)
return vals
## Handlers for generic case where xk and pk are given
## The _drv prefix probably means discrete random variable.
def _drv_pmf(self, xk, *args):
try:
return self.P[xk]
except KeyError:
return 0.0
def _drv_cdf(self, xk, *args):
indx = argmax((self.xk > xk), axis=-1)-1
return self.F[self.xk[indx]]
def _drv_ppf(self, q, *args):
indx = argmax((self.qvals >= q), axis=-1)
return self.Finv[self.qvals[indx]]
def _drv_nonzero(self, k, *args):
return 1
def _drv_moment(self, n, *args):
n = asarray(n)
return sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _drv_moment_gen(self, t, *args):
t = asarray(t)
return sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0)
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
# many changes, originally not even a return
tot = 0.0
diff = 1e100
# pos = self.a
pos = max(0.0, 1.0*self.a)
count = 0
# handle cases with infinite support
ulimit = max(1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
llimit = min(-1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
while (pos <= self.b) and ((pos <= ulimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# use pmf because _pmf does not check support in randint and there
# might be problems ? with correct self.a, self.b at this stage
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = -self.inc
while (pos >= self.a) and ((pos >= llimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# using pmf instead of _pmf, see above
tot += diff
pos -= self.inc
count += 1
return tot
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.b
a = self.a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= self.b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= self.a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
# testcase: return wrong number at lower index
# python -c "from scipy.stats import zipf;print zipf.ppf(0.01, 2)" wrong
# python -c "from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)"
# python -c "from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def entropy(pk, qk=None, base=None):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0*pk / sum(pk, axis=0)
if qk is None:
vec = entr(pk)
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / sum(qk, axis=0)
vec = kl_div(pk, qk)
S = sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers with non-zero
probabilities ``pk`` with ``sum(pk) = 1``.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None, the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
Notes
-----
This class is similar to `rv_continuous`, the main differences being:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.a = a
self.b = b
self.name = name
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.return_integers = 1
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
self.extradoc = extradoc
if values is not None:
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
self.xk = take(ravel(self.xk), indx, 0)
self.pk = take(ravel(self.pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = dict(zip(self.xk, self.pk))
self.qvals = np.cumsum(self.pk, axis=0)
self.F = dict(zip(self.xk, self.qvals))
decreasing_keys = sorted(self.F.keys(), reverse=True)
self.Finv = dict((self.F[k], k) for k in decreasing_keys)
self._ppf = instancemethod(vectorize(_drv_ppf, otypes='d'),
self, rv_discrete)
self._pmf = instancemethod(vectorize(_drv_pmf, otypes='d'),
self, rv_discrete)
self._cdf = instancemethod(vectorize(_drv_cdf, otypes='d'),
self, rv_discrete)
self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)
self.generic_moment = instancemethod(_drv_moment,
self, rv_discrete)
self.moment_gen = instancemethod(_drv_moment_gen,
self, rv_discrete)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
else:
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(_vec_generic_moment,
self, rv_discrete)
# backwards compat. was removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vec_generic_moment = np.deprecate(_vec_generic_moment,
"vec_generic_moment",
"generic_moment")
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2 # +1 is for self
self._ppfvec = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
#discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
m = arange(int(self.a), k+1)
return sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""
Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.a-1)
place(output, cond2, self.b)
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.b)
place(output, cond2, self.a-1)
# call place only if at least 1 valid argument
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return entropy(self.pk)
else:
mu = int(self.stats(*args, **{'moments': 'm'}))
val = self.pmf(mu, *args)
ent = entr(val)
k = 1
term = 1.0
while (abs(term) > _EPS):
val = self.pmf(mu+k, *args)
term = entr(val)
val = self.pmf(mu-k, *args)
term += entr(val)
k += 1
ent += term
return ent
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for integration, default is set to the
support of the distribution, inclusive (``ul <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``ul <= k <= ub``).
Default is False.
Returns
-------
expect : float
Expected value.
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to
evaluate could be added as keyword parameter, to evaluate functions
with non-monotonic shapes, points include integers in (-suppnmin,
suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative
integers are evaluated)
"""
# moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
# avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 # minimum number of points to evaluate (+ and -)
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = (self.b)
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
if np.isposinf(ub)[()]:
# work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan
invfac = 1 - self.cdf(lb-1, *args)
else:
invfac = 1 - self.cdf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) # check limits
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
# handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while ((pos >= lb) and (diff > self.moment_tol) and
count <= maxcount):
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot/invfac
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
| bsd-3-clause |
wkfwkf/statsmodels | statsmodels/regression/tests/test_regression.py | 6 | 37622 | """
Test functions for models.regression
"""
# TODO: Test for LM
from statsmodels.compat.python import long, lrange
import warnings
import pandas
import numpy as np
from numpy.testing import (assert_almost_equal, assert_approx_equal,
assert_raises, assert_equal, assert_allclose)
from scipy.linalg import toeplitz
from statsmodels.tools.tools import add_constant, categorical
from statsmodels.compat.numpy import np_matrix_rank
from statsmodels.regression.linear_model import OLS, WLS, GLS, yule_walker
from statsmodels.datasets import longley
from scipy.stats import t as student_t
DECIMAL_4 = 4
DECIMAL_3 = 3
DECIMAL_2 = 2
DECIMAL_1 = 1
DECIMAL_7 = 7
DECIMAL_0 = 0
class CheckRegressionResults(object):
"""
res2 contains results from Rmodelwrap or were obtained from a statistical
packages such as R, Stata, or SAS and were written to model_results
"""
decimal_params = DECIMAL_4
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params,
self.decimal_params)
decimal_standarderrors = DECIMAL_4
def test_standarderrors(self):
assert_almost_equal(self.res1.bse,self.res2.bse,
self.decimal_standarderrors)
decimal_confidenceintervals = DECIMAL_4
def test_confidenceintervals(self):
#NOTE: stata rounds residuals (at least) to sig digits so approx_equal
conf1 = self.res1.conf_int()
conf2 = self.res2.conf_int()
for i in range(len(conf1)):
assert_approx_equal(conf1[i][0], conf2[i][0],
self.decimal_confidenceintervals)
assert_approx_equal(conf1[i][1], conf2[i][1],
self.decimal_confidenceintervals)
decimal_conf_int_subset = DECIMAL_4
def test_conf_int_subset(self):
if len(self.res1.params) > 1:
ci1 = self.res1.conf_int(cols=(1,2))
ci2 = self.res1.conf_int()[1:3]
assert_almost_equal(ci1, ci2, self.decimal_conf_int_subset)
else:
pass
decimal_scale = DECIMAL_4
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale,
self.decimal_scale)
decimal_rsquared = DECIMAL_4
def test_rsquared(self):
assert_almost_equal(self.res1.rsquared, self.res2.rsquared,
self.decimal_rsquared)
decimal_rsquared_adj = DECIMAL_4
def test_rsquared_adj(self):
assert_almost_equal(self.res1.rsquared_adj, self.res2.rsquared_adj,
self.decimal_rsquared_adj)
def test_degrees(self):
assert_equal(self.res1.model.df_model, self.res2.df_model)
assert_equal(self.res1.model.df_resid, self.res2.df_resid)
decimal_ess = DECIMAL_4
def test_ess(self):
#Explained Sum of Squares
assert_almost_equal(self.res1.ess, self.res2.ess,
self.decimal_ess)
decimal_ssr = DECIMAL_4
def test_sumof_squaredresids(self):
assert_almost_equal(self.res1.ssr, self.res2.ssr, self.decimal_ssr)
decimal_mse_resid = DECIMAL_4
def test_mse_resid(self):
#Mean squared error of residuals
assert_almost_equal(self.res1.mse_model, self.res2.mse_model,
self.decimal_mse_resid)
decimal_mse_model = DECIMAL_4
def test_mse_model(self):
assert_almost_equal(self.res1.mse_resid, self.res2.mse_resid,
self.decimal_mse_model)
decimal_mse_total = DECIMAL_4
def test_mse_total(self):
assert_almost_equal(self.res1.mse_total, self.res2.mse_total,
self.decimal_mse_total, err_msg="Test class %s" % self)
decimal_fvalue = DECIMAL_4
def test_fvalue(self):
#didn't change this, not sure it should complain -inf not equal -inf
#if not (np.isinf(self.res1.fvalue) and np.isinf(self.res2.fvalue)):
assert_almost_equal(self.res1.fvalue, self.res2.fvalue,
self.decimal_fvalue)
decimal_loglike = DECIMAL_4
def test_loglike(self):
assert_almost_equal(self.res1.llf, self.res2.llf, self.decimal_loglike)
decimal_aic = DECIMAL_4
def test_aic(self):
assert_almost_equal(self.res1.aic, self.res2.aic, self.decimal_aic)
decimal_bic = DECIMAL_4
def test_bic(self):
assert_almost_equal(self.res1.bic, self.res2.bic, self.decimal_bic)
decimal_pvalues = DECIMAL_4
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues,
self.decimal_pvalues)
decimal_wresid = DECIMAL_4
def test_wresid(self):
assert_almost_equal(self.res1.wresid, self.res2.wresid,
self.decimal_wresid)
decimal_resids = DECIMAL_4
def test_resids(self):
assert_almost_equal(self.res1.resid, self.res2.resid,
self.decimal_resids)
decimal_norm_resids = DECIMAL_4
def test_norm_resids(self):
assert_almost_equal(self.res1.resid_pearson, self.res2.resid_pearson,
self.decimal_norm_resids)
#TODO: test fittedvalues and what else?
class TestOLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
from .results.results_regression import Longley
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
res2 = Longley()
res2.wresid = res1.wresid # workaround hack
cls.res1 = res1
cls.res2 = res2
res_qr = OLS(data.endog, data.exog).fit(method="qr")
model_qr = OLS(data.endog, data.exog)
Q, R = np.linalg.qr(data.exog)
model_qr.exog_Q, model_qr.exog_R = Q, R
model_qr.normalized_cov_params = np.linalg.inv(np.dot(R.T, R))
model_qr.rank = np_matrix_rank(R)
res_qr2 = model_qr.fit(method="qr")
cls.res_qr = res_qr
cls.res_qr_manual = res_qr2
def test_eigenvalues(self):
eigenval_perc_diff = (self.res_qr.eigenvals - self.res_qr_manual.eigenvals)
eigenval_perc_diff /= self.res_qr.eigenvals
zeros = np.zeros_like(eigenval_perc_diff)
assert_almost_equal(eigenval_perc_diff, zeros, DECIMAL_7)
# Robust error tests. Compare values computed with SAS
def test_HC0_errors(self):
#They are split up because the copied results do not have any DECIMAL_4
#places for the last place.
assert_almost_equal(self.res1.HC0_se[:-1],
self.res2.HC0_se[:-1], DECIMAL_4)
assert_approx_equal(np.round(self.res1.HC0_se[-1]), self.res2.HC0_se[-1])
def test_HC1_errors(self):
assert_almost_equal(self.res1.HC1_se[:-1],
self.res2.HC1_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC1_se[-1], self.res2.HC1_se[-1])
def test_HC2_errors(self):
assert_almost_equal(self.res1.HC2_se[:-1],
self.res2.HC2_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC2_se[-1], self.res2.HC2_se[-1])
def test_HC3_errors(self):
assert_almost_equal(self.res1.HC3_se[:-1],
self.res2.HC3_se[:-1], DECIMAL_4)
assert_approx_equal(self.res1.HC3_se[-1], self.res2.HC3_se[-1])
def test_qr_params(self):
assert_almost_equal(self.res1.params,
self.res_qr.params, 6)
def test_qr_normalized_cov_params(self):
#todo: need assert_close
assert_almost_equal(np.ones_like(self.res1.normalized_cov_params),
self.res1.normalized_cov_params /
self.res_qr.normalized_cov_params, 5)
def test_missing(self):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
data.endog[[3, 7, 14]] = np.nan
mod = OLS(data.endog, data.exog, missing='drop')
assert_equal(mod.endog.shape[0], 13)
assert_equal(mod.exog.shape[0], 13)
def test_rsquared_adj_overfit(self):
# Test that if df_resid = 0, rsquared_adj = 0.
# This is a regression test for user issue:
# https://github.com/statsmodels/statsmodels/issues/868
with warnings.catch_warnings(record=True):
x = np.random.randn(5)
y = np.random.randn(5, 6)
results = OLS(x, y).fit()
rsquared_adj = results.rsquared_adj
assert_equal(rsquared_adj, np.nan)
def test_qr_alternatives(self):
assert_allclose(self.res_qr.params, self.res_qr_manual.params,
rtol=5e-12)
def test_norm_resid(self):
resid = self.res1.wresid
norm_resid = resid / np.sqrt(np.sum(resid**2.0) / self.res1.df_resid)
model_norm_resid = self.res1.resid_pearson
assert_almost_equal(model_norm_resid, norm_resid, DECIMAL_7)
def test_norm_resid_zero_variance(self):
with warnings.catch_warnings(record=True):
y = self.res1.model.endog
res = OLS(y,y).fit()
assert_allclose(res.scale, 0, atol=1e-20)
assert_allclose(res.wresid, res.resid_pearson, atol=5e-11)
class TestRTO(CheckRegressionResults):
@classmethod
def setupClass(cls):
from .results.results_regression import LongleyRTO
data = longley.load()
res1 = OLS(data.endog, data.exog).fit()
res2 = LongleyRTO()
res2.wresid = res1.wresid # workaround hack
cls.res1 = res1
cls.res2 = res2
res_qr = OLS(data.endog, data.exog).fit(method="qr")
cls.res_qr = res_qr
class TestFtest(object):
"""
Tests f_test vs. RegressionResults
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
R = np.identity(7)[:-1,:]
cls.Ftest = cls.res1.f_test(R)
def test_F(self):
assert_almost_equal(self.Ftest.fvalue, self.res1.fvalue, DECIMAL_4)
def test_p(self):
assert_almost_equal(self.Ftest.pvalue, self.res1.f_pvalue, DECIMAL_4)
def test_Df_denom(self):
assert_equal(self.Ftest.df_denom, self.res1.model.df_resid)
def test_Df_num(self):
assert_equal(self.Ftest.df_num, 6)
class TestFTest2(object):
"""
A joint test that the coefficient on
GNP = the coefficient on UNEMP and that the coefficient on
POP = the coefficient on YEAR for the Longley dataset.
Ftest1 is from statsmodels. Results are from Rpy using R's car library.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
R2 = [[0,1,-1,0,0,0,0],[0, 0, 0, 0, 1, -1, 0]]
cls.Ftest1 = res1.f_test(R2)
hyp = 'x2 = x3, x5 = x6'
cls.NewFtest1 = res1.f_test(hyp)
def test_new_ftest(self):
assert_equal(self.NewFtest1.fvalue, self.Ftest1.fvalue)
def test_fvalue(self):
assert_almost_equal(self.Ftest1.fvalue, 9.7404618732968196, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ftest1.pvalue, 0.0056052885317493459,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ftest1.df_denom, 9)
def test_df_num(self):
assert_equal(self.Ftest1.df_num, 2)
class TestFtestQ(object):
"""
A joint hypothesis test that Rb = q. Coefficient tests are essentially
made up. Test values taken from Stata.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
R = np.array([[0,1,1,0,0,0,0],
[0,1,0,1,0,0,0],
[0,1,0,0,0,0,0],
[0,0,0,0,1,0,0],
[0,0,0,0,0,1,0]])
q = np.array([0,0,0,1,0])
cls.Ftest1 = res1.f_test((R,q))
def test_fvalue(self):
assert_almost_equal(self.Ftest1.fvalue, 70.115557, 5)
def test_pvalue(self):
assert_almost_equal(self.Ftest1.pvalue, 6.229e-07, 10)
def test_df_denom(self):
assert_equal(self.Ftest1.df_denom, 9)
def test_df_num(self):
assert_equal(self.Ftest1.df_num, 5)
class TestTtest(object):
"""
Test individual t-tests. Ie., are the coefficients significantly
different than zero.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
R = np.identity(7)
cls.Ttest = cls.res1.t_test(R)
hyp = 'x1 = 0, x2 = 0, x3 = 0, x4 = 0, x5 = 0, x6 = 0, const = 0'
cls.NewTTest = cls.res1.t_test(hyp)
def test_new_tvalue(self):
assert_equal(self.NewTTest.tvalue, self.Ttest.tvalue)
def test_tvalue(self):
assert_almost_equal(self.Ttest.tvalue, self.res1.tvalues, DECIMAL_4)
def test_sd(self):
assert_almost_equal(self.Ttest.sd, self.res1.bse, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ttest.pvalue, student_t.sf(
np.abs(self.res1.tvalues), self.res1.model.df_resid)*2,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ttest.df_denom, self.res1.model.df_resid)
def test_effect(self):
assert_almost_equal(self.Ttest.effect, self.res1.params)
class TestTtest2(object):
"""
Tests the hypothesis that the coefficients on POP and YEAR
are equal.
Results from RPy using 'car' package.
"""
@classmethod
def setupClass(cls):
R = np.zeros(7)
R[4:6] = [1,-1]
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
res1 = OLS(data.endog, data.exog).fit()
cls.Ttest1 = res1.t_test(R)
def test_tvalue(self):
assert_almost_equal(self.Ttest1.tvalue, -4.0167754636397284,
DECIMAL_4)
def test_sd(self):
assert_almost_equal(self.Ttest1.sd, 455.39079425195314, DECIMAL_4)
def test_pvalue(self):
assert_almost_equal(self.Ttest1.pvalue, 2*0.0015163772380932246,
DECIMAL_4)
def test_df_denom(self):
assert_equal(self.Ttest1.df_denom, 9)
def test_effect(self):
assert_almost_equal(self.Ttest1.effect, -1829.2025687186533, DECIMAL_4)
class TestGLS(object):
"""
These test results were obtained by replication with R.
"""
@classmethod
def setupClass(cls):
from .results.results_regression import LongleyGls
data = longley.load()
exog = add_constant(np.column_stack((data.exog[:,1],
data.exog[:,4])), prepend=False)
tmp_results = OLS(data.endog, exog).fit()
rho = np.corrcoef(tmp_results.resid[1:],
tmp_results.resid[:-1])[0][1] # by assumption
order = toeplitz(np.arange(16))
sigma = rho**order
GLS_results = GLS(data.endog, exog, sigma=sigma).fit()
cls.res1 = GLS_results
cls.res2 = LongleyGls()
# attach for test_missing
cls.sigma = sigma
cls.exog = exog
cls.endog = data.endog
def test_aic(self):
assert_approx_equal(self.res1.aic+2, self.res2.aic, 3)
def test_bic(self):
assert_approx_equal(self.res1.bic, self.res2.bic, 2)
def test_loglike(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_0)
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_1)
def test_resid(self):
assert_almost_equal(self.res1.resid, self.res2.resid, DECIMAL_4)
def test_scale(self):
assert_almost_equal(self.res1.scale, self.res2.scale, DECIMAL_4)
def test_tvalues(self):
assert_almost_equal(self.res1.tvalues, self.res2.tvalues, DECIMAL_4)
def test_standarderrors(self):
assert_almost_equal(self.res1.bse, self.res2.bse, DECIMAL_4)
def test_fittedvalues(self):
assert_almost_equal(self.res1.fittedvalues, self.res2.fittedvalues,
DECIMAL_4)
def test_pvalues(self):
assert_almost_equal(self.res1.pvalues, self.res2.pvalues, DECIMAL_4)
def test_missing(self):
endog = self.endog.copy() # copy or changes endog for other methods
endog[[4,7,14]] = np.nan
mod = GLS(endog, self.exog, sigma=self.sigma, missing='drop')
assert_equal(mod.endog.shape[0], 13)
assert_equal(mod.exog.shape[0], 13)
assert_equal(mod.sigma.shape, (13,13))
class TestGLS_alt_sigma(CheckRegressionResults):
"""
Test that GLS with no argument is equivalent to OLS.
"""
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
ols_res = OLS(data.endog, data.exog).fit()
gls_res = GLS(data.endog, data.exog).fit()
gls_res_scalar = GLS(data.endog, data.exog, sigma=1)
cls.endog = data.endog
cls.exog = data.exog
cls.res1 = gls_res
cls.res2 = ols_res
cls.res3 = gls_res_scalar
# self.res2.conf_int = self.res2.conf_int()
def test_wrong_size_sigma_1d(self):
n = len(self.endog)
assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones(n-1))
def test_wrong_size_sigma_2d(self):
n = len(self.endog)
assert_raises(ValueError, GLS, self.endog, self.exog, sigma=np.ones((n-1,n-1)))
# def check_confidenceintervals(self, conf1, conf2):
# assert_almost_equal(conf1, conf2, DECIMAL_4)
class TestLM(object):
@classmethod
def setupClass(cls):
# TODO: Test HAC method
X = np.random.randn(100,3)
b = np.ones((3,1))
e = np.random.randn(100,1)
y = np.dot(X,b) + e
# Cases?
# Homoskedastic
# HC0
cls.res1_full = OLS(y,X).fit()
cls.res1_restricted = OLS(y,X[:,0]).fit()
cls.res2_full = cls.res1_full.get_robustcov_results('HC0')
cls.res2_restricted = cls.res1_restricted.get_robustcov_results('HC0')
cls.X = X
cls.Y = y
def test_LM_homoskedastic(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
S = np.dot(resid,resid) / n * np.dot(X.T,X) / n
Sinv = np.linalg.inv(S)
s = np.mean(X * resid[:,None], 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res1_full.compare_lm_test(self.res1_restricted)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_nodemean(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
S = np.dot(scores.T,scores) / n
Sinv = np.linalg.inv(S)
s = np.mean(scores, 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, demean=False)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_demean(self):
resid = self.res1_restricted.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
scores_demean = scores - scores.mean(0)
S = np.dot(scores_demean.T,scores_demean) / n
Sinv = np.linalg.inv(S)
s = np.mean(scores, 0)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_heteroskedastic_LRversion(self):
resid = self.res1_restricted.wresid
resid_full = self.res1_full.wresid
n = resid.shape[0]
X = self.X
scores = X * resid[:,None]
s = np.mean(scores, 0)
scores = X * resid_full[:,None]
S = np.dot(scores.T,scores) / n
Sinv = np.linalg.inv(S)
LMstat = n * np.dot(np.dot(s,Sinv),s.T)
LMstat_OLS = self.res2_full.compare_lm_test(self.res2_restricted, use_lr = True)
LMstat2 = LMstat_OLS[0]
assert_almost_equal(LMstat, LMstat2, DECIMAL_7)
def test_LM_nonnested(self):
assert_raises(ValueError, self.res2_restricted.compare_lm_test, self.res2_full)
class TestOLS_GLS_WLS_equivalence(object):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
y = data.endog
X = data.exog
n = y.shape[0]
w = np.ones(n)
cls.results = []
cls.results.append(OLS(y, X).fit())
cls.results.append(WLS(y, X, w).fit())
cls.results.append(GLS(y, X, 100*w).fit())
cls.results.append(GLS(y, X, np.diag(0.1*w)).fit())
def test_ll(self):
llf = np.array([r.llf for r in self.results])
llf_1 = np.ones_like(llf) * self.results[0].llf
assert_almost_equal(llf, llf_1, DECIMAL_7)
ic = np.array([r.aic for r in self.results])
ic_1 = np.ones_like(ic) * self.results[0].aic
assert_almost_equal(ic, ic_1, DECIMAL_7)
ic = np.array([r.bic for r in self.results])
ic_1 = np.ones_like(ic) * self.results[0].bic
assert_almost_equal(ic, ic_1, DECIMAL_7)
def test_params(self):
params = np.array([r.params for r in self.results])
params_1 = np.array([self.results[0].params] * len(self.results))
assert_allclose(params, params_1)
def test_ss(self):
bse = np.array([r.bse for r in self.results])
bse_1 = np.array([self.results[0].bse] * len(self.results))
assert_allclose(bse, bse_1)
def test_rsquared(self):
rsquared = np.array([r.rsquared for r in self.results])
rsquared_1 = np.array([self.results[0].rsquared] * len(self.results))
assert_almost_equal(rsquared, rsquared_1, DECIMAL_7)
class TestGLS_WLS_equivalence(TestOLS_GLS_WLS_equivalence):
# reuse test methods
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
y = data.endog
X = data.exog
n = y.shape[0]
np.random.seed(5)
w = np.random.uniform(0.5, 1, n)
w_inv = 1. / w
cls.results = []
cls.results.append(WLS(y, X, w).fit())
cls.results.append(WLS(y, X, 0.01 * w).fit())
cls.results.append(GLS(y, X, 100 * w_inv).fit())
cls.results.append(GLS(y, X, np.diag(0.1 * w_inv)).fit())
def test_rsquared(self):
# TODO: WLS rsquared is ok, GLS might have wrong centered_tss
# We only check that WLS and GLS rsquared is invariant to scaling
# WLS and GLS have different rsquared
assert_almost_equal(self.results[1].rsquared, self.results[0].rsquared,
DECIMAL_7)
assert_almost_equal(self.results[3].rsquared, self.results[2].rsquared,
DECIMAL_7)
class TestNonFit(object):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.endog = data.endog
cls.exog = data.exog
cls.ols_model = OLS(data.endog, data.exog)
def test_df_resid(self):
df_resid = self.endog.shape[0] - self.exog.shape[1]
assert_equal(self.ols_model.df_resid, long(9))
class TestWLS_CornerCases(object):
@classmethod
def setupClass(cls):
cls.exog = np.ones((1,))
cls.endog = np.ones((1,))
weights = 1
cls.wls_res = WLS(cls.endog, cls.exog, weights=weights).fit()
def test_wrong_size_weights(self):
weights = np.ones((10,10))
assert_raises(ValueError, WLS, self.endog, self.exog, weights=weights)
class TestWLSExogWeights(CheckRegressionResults):
#Test WLS with Greene's credit card data
#reg avgexp age income incomesq ownrent [aw=1/incomesq]
def __init__(self):
from .results.results_regression import CCardWLS
from statsmodels.datasets.ccard import load
dta = load()
dta.exog = add_constant(dta.exog, prepend=False)
nobs = 72.
weights = 1/dta.exog[:,2]
# for comparison with stata analytic weights
scaled_weights = ((weights * nobs)/weights.sum())
self.res1 = WLS(dta.endog, dta.exog, weights=scaled_weights).fit()
self.res2 = CCardWLS()
self.res2.wresid = scaled_weights ** .5 * self.res2.resid
# correction because we use different definition for loglike/llf
corr_ic = 2 * (self.res1.llf - self.res2.llf)
self.res2.aic -= corr_ic
self.res2.bic -= corr_ic
self.res2.llf += 0.5 * np.sum(np.log(self.res1.model.weights))
def test_wls_example():
#example from the docstring, there was a note about a bug, should
#be fixed now
Y = [1,3,4,5,2,3,4]
X = lrange(1,8)
X = add_constant(X, prepend=False)
wls_model = WLS(Y,X, weights=lrange(1,8)).fit()
#taken from R lm.summary
assert_almost_equal(wls_model.fvalue, 0.127337843215, 6)
assert_almost_equal(wls_model.scale, 2.44608530786**2, 6)
def test_wls_tss():
y = np.array([22, 22, 22, 23, 23, 23])
X = [[1, 0], [1, 0], [1, 1], [0, 1], [0, 1], [0, 1]]
ols_mod = OLS(y, add_constant(X, prepend=False)).fit()
yw = np.array([22, 22, 23.])
Xw = [[1,0],[1,1],[0,1]]
w = np.array([2, 1, 3.])
wls_mod = WLS(yw, add_constant(Xw, prepend=False), weights=w).fit()
assert_equal(ols_mod.centered_tss, wls_mod.centered_tss)
class TestWLSScalarVsArray(CheckRegressionResults):
@classmethod
def setupClass(cls):
from statsmodels.datasets.longley import load
dta = load()
dta.exog = add_constant(dta.exog, prepend=True)
wls_scalar = WLS(dta.endog, dta.exog, weights=1./3).fit()
weights = [1/3.] * len(dta.endog)
wls_array = WLS(dta.endog, dta.exog, weights=weights).fit()
cls.res1 = wls_scalar
cls.res2 = wls_array
#class TestWLS_GLS(CheckRegressionResults):
# @classmethod
# def setupClass(cls):
# from statsmodels.datasets.ccard import load
# data = load()
# cls.res1 = WLS(data.endog, data.exog, weights = 1/data.exog[:,2]).fit()
# cls.res2 = GLS(data.endog, data.exog, sigma = data.exog[:,2]).fit()
#
# def check_confidenceintervals(self, conf1, conf2):
# assert_almost_equal(conf1, conf2(), DECIMAL_4)
def test_wls_missing():
from statsmodels.datasets.ccard import load
data = load()
endog = data.endog
endog[[10, 25]] = np.nan
mod = WLS(data.endog, data.exog, weights = 1/data.exog[:,2], missing='drop')
assert_equal(mod.endog.shape[0], 70)
assert_equal(mod.exog.shape[0], 70)
assert_equal(mod.weights.shape[0], 70)
class TestWLS_OLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = OLS(data.endog, data.exog).fit()
cls.res2 = WLS(data.endog, data.exog).fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
class TestGLS_OLS(CheckRegressionResults):
@classmethod
def setupClass(cls):
data = longley.load()
data.exog = add_constant(data.exog, prepend=False)
cls.res1 = GLS(data.endog, data.exog).fit()
cls.res2 = OLS(data.endog, data.exog).fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
#TODO: test AR
# why the two-stage in AR?
#class test_ar(object):
# from statsmodels.datasets.sunspots import load
# data = load()
# model = AR(data.endog, rho=4).fit()
# R_res = RModel(data.endog, aic="FALSE", order_max=4)
# def test_params(self):
# assert_almost_equal(self.model.rho,
# pass
# def test_order(self):
# In R this can be defined or chosen by minimizing the AIC if aic=True
# pass
class TestYuleWalker(object):
@classmethod
def setupClass(cls):
from statsmodels.datasets.sunspots import load
data = load()
cls.rho, cls.sigma = yule_walker(data.endog, order=4,
method="mle")
cls.R_params = [1.2831003105694765, -0.45240924374091945,
-0.20770298557575195, 0.047943648089542337]
def test_params(self):
assert_almost_equal(self.rho, self.R_params, DECIMAL_4)
class TestDataDimensions(CheckRegressionResults):
@classmethod
def setupClass(cls):
np.random.seed(54321)
cls.endog_n_ = np.random.uniform(0,20,size=30)
cls.endog_n_one = cls.endog_n_[:,None]
cls.exog_n_ = np.random.uniform(0,20,size=30)
cls.exog_n_one = cls.exog_n_[:,None]
cls.degen_exog = cls.exog_n_one[:-1]
cls.mod1 = OLS(cls.endog_n_one, cls.exog_n_one)
cls.mod1.df_model += 1
cls.res1 = cls.mod1.fit()
# Note that these are created for every subclass..
# A little extra overhead probably
cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_one)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
def check_confidenceintervals(self, conf1, conf2):
assert_almost_equal(conf1, conf2(), DECIMAL_4)
class TestGLS_large_data(TestDataDimensions):
@classmethod
def setupClass(cls):
nobs = 1000
y = np.random.randn(nobs,1)
X = np.random.randn(nobs,20)
sigma = np.ones_like(y)
cls.gls_res = GLS(y, X, sigma=sigma).fit()
cls.gls_res_scalar = GLS(y, X, sigma=1).fit()
cls.gls_res_none= GLS(y, X).fit()
cls.ols_res = OLS(y, X).fit()
def test_large_equal_params(self):
assert_almost_equal(self.ols_res.params, self.gls_res.params, DECIMAL_7)
def test_large_equal_loglike(self):
assert_almost_equal(self.ols_res.llf, self.gls_res.llf, DECIMAL_7)
def test_large_equal_params_none(self):
assert_almost_equal(self.gls_res.params, self.gls_res_none.params,
DECIMAL_7)
class TestNxNx(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxNx, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_, cls.exog_n_)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
class TestNxOneNx(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxOneNx, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_one, cls.exog_n_)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
class TestNxNxOne(TestDataDimensions):
@classmethod
def setupClass(cls):
super(TestNxNxOne, cls).setupClass()
cls.mod2 = OLS(cls.endog_n_, cls.exog_n_one)
cls.mod2.df_model += 1
cls.res2 = cls.mod2.fit()
def test_bad_size():
np.random.seed(54321)
data = np.random.uniform(0,20,31)
assert_raises(ValueError, OLS, data, data[1:])
def test_const_indicator():
np.random.seed(12345)
X = np.random.randint(0, 3, size=30)
X = categorical(X, drop=True)
y = np.dot(X, [1., 2., 3.]) + np.random.normal(size=30)
modc = OLS(y, add_constant(X[:,1:], prepend=True)).fit()
mod = OLS(y, X, hasconst=True).fit()
assert_almost_equal(modc.rsquared, mod.rsquared, 12)
def test_706():
# make sure one regressor pandas Series gets passed to DataFrame
# for conf_int.
y = pandas.Series(np.random.randn(10))
x = pandas.Series(np.ones(10))
res = OLS(y,x).fit()
conf_int = res.conf_int()
np.testing.assert_equal(conf_int.shape, (1, 2))
np.testing.assert_(isinstance(conf_int, pandas.DataFrame))
def test_summary():
# test 734
import re
dta = longley.load_pandas()
X = dta.exog
X["constant"] = 1
y = dta.endog
with warnings.catch_warnings(record=True):
res = OLS(y, X).fit()
table = res.summary().as_latex()
# replace the date and time
table = re.sub("(?<=\n\\\\textbf\{Date:\} &).+?&",
" Sun, 07 Apr 2013 &", table)
table = re.sub("(?<=\n\\\\textbf\{Time:\} &).+?&",
" 13:46:07 &", table)
expected = """\\begin{center}
\\begin{tabular}{lclc}
\\toprule
\\textbf{Dep. Variable:} & TOTEMP & \\textbf{ R-squared: } & 0.995 \\\\
\\textbf{Model:} & OLS & \\textbf{ Adj. R-squared: } & 0.992 \\\\
\\textbf{Method:} & Least Squares & \\textbf{ F-statistic: } & 330.3 \\\\
\\textbf{Date:} & Sun, 07 Apr 2013 & \\textbf{ Prob (F-statistic):} & 4.98e-10 \\\\
\\textbf{Time:} & 13:46:07 & \\textbf{ Log-Likelihood: } & -109.62 \\\\
\\textbf{No. Observations:} & 16 & \\textbf{ AIC: } & 233.2 \\\\
\\textbf{Df Residuals:} & 9 & \\textbf{ BIC: } & 238.6 \\\\
\\textbf{Df Model:} & 6 & \\textbf{ } & \\\\
\\bottomrule
\\end{tabular}
\\begin{tabular}{lccccc}
& \\textbf{coef} & \\textbf{std err} & \\textbf{t} & \\textbf{P$>$$|$t$|$} & \\textbf{[95.0\\% Conf. Int.]} \\\\
\\midrule
\\textbf{GNPDEFL} & 15.0619 & 84.915 & 0.177 & 0.863 & -177.029 207.153 \\\\
\\textbf{GNP} & -0.0358 & 0.033 & -1.070 & 0.313 & -0.112 0.040 \\\\
\\textbf{UNEMP} & -2.0202 & 0.488 & -4.136 & 0.003 & -3.125 -0.915 \\\\
\\textbf{ARMED} & -1.0332 & 0.214 & -4.822 & 0.001 & -1.518 -0.549 \\\\
\\textbf{POP} & -0.0511 & 0.226 & -0.226 & 0.826 & -0.563 0.460 \\\\
\\textbf{YEAR} & 1829.1515 & 455.478 & 4.016 & 0.003 & 798.788 2859.515 \\\\
\\textbf{constant} & -3.482e+06 & 8.9e+05 & -3.911 & 0.004 & -5.5e+06 -1.47e+06 \\\\
\\bottomrule
\\end{tabular}
\\begin{tabular}{lclc}
\\textbf{Omnibus:} & 0.749 & \\textbf{ Durbin-Watson: } & 2.559 \\\\
\\textbf{Prob(Omnibus):} & 0.688 & \\textbf{ Jarque-Bera (JB): } & 0.684 \\\\
\\textbf{Skew:} & 0.420 & \\textbf{ Prob(JB): } & 0.710 \\\\
\\textbf{Kurtosis:} & 2.434 & \\textbf{ Cond. No. } & 4.86e+09 \\\\
\\bottomrule
\\end{tabular}
%\\caption{OLS Regression Results}
\\end{center}"""
assert_equal(table, expected)
class TestRegularizedFit(object):
# Make sure there are no issues when there are no selected
# variables.
def test_empty_model(self):
np.random.seed(742)
n = 100
endog = np.random.normal(size=n)
exog = np.random.normal(size=(n, 3))
model = OLS(endog, exog)
result = model.fit_regularized(alpha=1000)
assert_equal(result.params, 0.)
assert_equal(result.bse, 0.)
def test_regularized(self):
import os
from . import glmnet_r_results
cur_dir = os.path.dirname(os.path.abspath(__file__))
data = np.loadtxt(os.path.join(cur_dir, "results", "lasso_data.csv"),
delimiter=",")
tests = [x for x in dir(glmnet_r_results) if x.startswith("rslt_")]
for test in tests:
vec = getattr(glmnet_r_results, test)
n = vec[0]
p = vec[1]
L1_wt = float(vec[2])
lam = float(vec[3])
params = vec[4:].astype(np.float64)
endog = data[0:n, 0]
exog = data[0:n, 1:(p+1)]
endog = endog - endog.mean()
endog /= endog.std(ddof=1)
exog = exog - exog.mean(0)
exog /= exog.std(0, ddof=1)
mod = OLS(endog, exog)
rslt = mod.fit_regularized(L1_wt=L1_wt, alpha=lam)
assert_almost_equal(rslt.params, params, decimal=3)
# Smoke test for summary
smry = rslt.summary()
def test_formula_missing_cat():
# gh-805
import statsmodels.api as sm
from statsmodels.formula.api import ols
from patsy import PatsyError
dta = sm.datasets.grunfeld.load_pandas().data
dta.ix[0, 'firm'] = np.nan
mod = ols(formula='value ~ invest + capital + firm + year',
data=dta.dropna())
res = mod.fit()
mod2 = ols(formula='value ~ invest + capital + firm + year',
data=dta)
res2 = mod2.fit()
assert_almost_equal(res.params.values, res2.params.values)
assert_raises(PatsyError, ols, 'value ~ invest + capital + firm + year',
data=dta, missing='raise')
def test_missing_formula_predict():
# see 2171
nsample = 30
data = pandas.DataFrame({'x': np.linspace(0, 10, nsample)})
null = pandas.DataFrame({'x': np.array([np.nan])})
data = pandas.concat([data, null])
beta = np.array([1, 0.1])
e = np.random.normal(size=nsample+1)
data['y'] = beta[0] + beta[1] * data['x'] + e
model = OLS.from_formula('y ~ x', data=data)
fit = model.fit()
pred = fit.predict(exog=data[:-1])
if __name__=="__main__":
import nose
# run_module_suite()
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
# nose.runmodule(argv=[__file__,'-vvs','-x'], exit=False) #, '--pdb'
| bsd-3-clause |
leighpauls/k2cro4 | native_client/build/buildbot_chrome_nacl_stage.py | 1 | 11521 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
def FindChrome(src_dir, options):
if options.browser_path:
return options.browser_path
# List of places that chrome could live.
# In theory we should be more careful about what platform we're actually
# building for.
# As currently constructed, this will also hork people who have debug and
# release builds sitting side by side who build locally.
mode = options.mode
chrome_locations = [
'build/%s/chrome.exe' % mode,
'chrome/%s/chrome.exe' % mode,
# Windows Chromium ninja builder
'out/%s/chrome.exe' % mode,
'out/%s/chrome' % mode,
# Mac Chromium make builder
'out/%s/Chromium.app/Contents/MacOS/Chromium' % mode,
# Mac release make builder
'out/%s/Google Chrome.app/Contents/MacOS/Google Chrome' % mode,
# Mac Chromium xcode builder
'xcodebuild/%s/Chromium.app/Contents/MacOS/Chromium' % mode,
# Mac release xcode builder
'xcodebuild/%s/Google Chrome.app/Contents/MacOS/Google Chrome' % mode,
]
# Pick the first one we find.
for chrome in chrome_locations:
chrome_filename = os.path.join(src_dir, chrome)
if os.path.exists(chrome_filename):
return chrome_filename
raise Exception('Cannot find a chome binary - specify one with '
'--browser_path?')
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_dir = os.path.dirname(script_dir)
src_dir = os.path.dirname(nacl_dir)
# Decide platform specifics.
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
bits = 32
scons = [python, 'scons.py']
else:
p = subprocess.Popen(
'uname -m | '
'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"',
shell=True, stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif p_stdout.find('64') >= 0:
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
scons = ['xvfb-run', '--auto-servernum', python, 'scons.py']
chrome_filename = FindChrome(src_dir, options)
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = 'c:\\Program Files (x86)\\Microsoft Visual Studio 9.0\\Common7\\Tools\\'
env['VS80COMNTOOLS'] = 'c:\\Program Files (x86)\\Microsoft Visual Studio 8.0\\Common7\\Tools\\'
else:
# 32bit HOST
env['VS90COMNTOOLS'] = 'c:\\Program Files\\Microsoft Visual Studio 9.0\\Common7\\Tools\\'
env['VS80COMNTOOLS'] = 'c:\\Program Files\\Microsoft Visual Studio 8.0\\Common7\\Tools\\'
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Download the toolchain(s).
if options.enable_pnacl:
pnacl_toolchain = []
else:
pnacl_toolchain = ['--no-pnacl']
RunCommand([python,
os.path.join(script_dir, 'download_toolchains.py'),
'--no-arm-trusted'] + pnacl_toolchain + ['TOOL_REVISIONS'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
if options.enable_pnacl:
# TODO(dschuff): remove this when streaming is the default
os.environ['NACL_STREAMING_TRANSLATION'] = 'true'
RunTests('pnacl', cmd + ['bitcode=1'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
parser.add_option('--enable_pnacl', dest='enable_pnacl', default=-1,
type='int', help='Run pnacl tests?')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
# Set defaults for enabling pnacl.
if options.enable_pnacl == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_pnacl = 1
else:
options.enable_pnacl = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
| bsd-3-clause |
mrjacobagilbert/gnuradio | gr-filter/examples/reconstruction.py | 5 | 4279 | #!/usr/bin/env python
#
# Copyright 2010,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, digital
from gnuradio import filter
from gnuradio import blocks
from gnuradio.fft import window
import sys
import numpy
try:
from gnuradio import channels
except ImportError:
print("Error: Program requires gr-channels.")
sys.exit(1)
try:
from matplotlib import pyplot
except ImportError:
print("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).")
sys.exit(1)
fftlen = 8192
def main():
N = 10000
fs = 2000.0
Ts = 1.0 / fs
t = numpy.arange(0, N*Ts, Ts)
# When playing with the number of channels, be careful about the filter
# specs and the channel map of the synthesizer set below.
nchans = 10
# Build the filter(s)
bw = 1000
tb = 400
proto_taps = filter.firdes.low_pass_2(1, nchans*fs,
bw, tb, 80,
window.WIN_BLACKMAN_hARRIS)
print("Filter length: ", len(proto_taps))
# Create a modulated signal
npwr = 0.01
data = numpy.random.randint(0, 256, N)
rrc_taps = filter.firdes.root_raised_cosine(1, 2, 1, 0.35, 41)
src = blocks.vector_source_b(data.astype(numpy.uint8).tolist(), False)
mod = digital.bpsk_mod(samples_per_symbol=2)
chan = channels.channel_model(npwr)
rrc = filter.fft_filter_ccc(1, rrc_taps)
# Split it up into pieces
channelizer = filter.pfb.channelizer_ccf(nchans, proto_taps, 2)
# Put the pieces back together again
syn_taps = [nchans*t for t in proto_taps]
synthesizer = filter.pfb_synthesizer_ccf(nchans, syn_taps, True)
src_snk = blocks.vector_sink_c()
snk = blocks.vector_sink_c()
# Remap the location of the channels
# Can be done in synth or channelizer (watch out for rotattions in
# the channelizer)
synthesizer.set_channel_map([ 0, 1, 2, 3, 4,
15, 16, 17, 18, 19])
tb = gr.top_block()
tb.connect(src, mod, chan, rrc, channelizer)
tb.connect(rrc, src_snk)
vsnk = []
for i in range(nchans):
tb.connect((channelizer,i), (synthesizer, i))
vsnk.append(blocks.vector_sink_c())
tb.connect((channelizer,i), vsnk[i])
tb.connect(synthesizer, snk)
tb.run()
sin = numpy.array(src_snk.data()[1000:])
sout = numpy.array(snk.data()[1000:])
# Plot original signal
fs_in = nchans*fs
f1 = pyplot.figure(1, figsize=(16,12), facecolor='w')
s11 = f1.add_subplot(2,2,1)
s11.psd(sin, NFFT=fftlen, Fs=fs_in)
s11.set_title("PSD of Original Signal")
s11.set_ylim([-200, -20])
s12 = f1.add_subplot(2,2,2)
s12.plot(sin.real[1000:1500], "o-b")
s12.plot(sin.imag[1000:1500], "o-r")
s12.set_title("Original Signal in Time")
start = 1
skip = 2
s13 = f1.add_subplot(2,2,3)
s13.plot(sin.real[start::skip], sin.imag[start::skip], "o")
s13.set_title("Constellation")
s13.set_xlim([-2, 2])
s13.set_ylim([-2, 2])
# Plot channels
nrows = int(numpy.sqrt(nchans))
ncols = int(numpy.ceil(float(nchans) / float(nrows)))
f2 = pyplot.figure(2, figsize=(16,12), facecolor='w')
for n in range(nchans):
s = f2.add_subplot(nrows, ncols, n+1)
s.psd(vsnk[n].data(), NFFT=fftlen, Fs=fs_in)
s.set_title("Channel {0}".format(n))
s.set_ylim([-200, -20])
# Plot reconstructed signal
fs_out = 2*nchans*fs
f3 = pyplot.figure(3, figsize=(16,12), facecolor='w')
s31 = f3.add_subplot(2,2,1)
s31.psd(sout, NFFT=fftlen, Fs=fs_out)
s31.set_title("PSD of Reconstructed Signal")
s31.set_ylim([-200, -20])
s32 = f3.add_subplot(2,2,2)
s32.plot(sout.real[1000:1500], "o-b")
s32.plot(sout.imag[1000:1500], "o-r")
s32.set_title("Reconstructed Signal in Time")
start = 0
skip = 4
s33 = f3.add_subplot(2,2,3)
s33.plot(sout.real[start::skip], sout.imag[start::skip], "o")
s33.set_title("Constellation")
s33.set_xlim([-2, 2])
s33.set_ylim([-2, 2])
pyplot.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
mkness/TheCannon | code/makeplot_test.py | 1 | 3991 | #!/usr/bin/python
import scipy
import numpy
from numpy import *
from scipy import ndimage
from scipy import interpolate
from numpy import loadtxt
import os
import numpy as np
from numpy import *
from matplotlib import pyplot
import matplotlib.pyplot as plt
from matplotlib.pyplot import axes
from matplotlib.pyplot import colorbar
from matplotlib.ticker import NullFormatter
import pyfits
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
rc('text', usetex=True)
rc('font', family='serif')
plt.rcParams['xtick.major.pad'] = 9
rcParams["xtick.labelsize"] = 16
rcParams["ytick.labelsize"] = 16
rcParams['xtick.major.pad']='8'
rcParams['ytick.major.pad']='8'
#rcParams['figure.figsize'] = 10, 10
rcParams['figure.figsize'] = 12.0, 14.0
fig, temp = pyplot.subplots(3,1, sharex=False, sharey=False)
ax1 = temp[0]
ax2 = temp[1]
ax3 = temp[2]
params_labels = [params[:,0], params[:,1], params[:,2] , covs_params[:,0,0]**0.5, covs_params[:,1,1]**0.5, covs_params[:,2,2]**0.5 ]
testdir = "/Users/ness/Downloads/Apogee_raw/calibration_fields/4332/apogee/spectro/redux/r3/s3/a3/v304/4332/"
file2 = '4332_data_all_more.txt'
file2in = testdir+file2
t,t_err,g,g_err,feh,feh_err = loadtxt(file2in, usecols = (1,2,3,4,5,6), unpack =1)
pick = logical_and(g > 0, logical_and(t_err < 300, feh > -4.0) )
cval = ['k', 'b', 'r']
input_ASPCAP = [t, g, feh, t_err, g_err, feh_err]
cind = array(input_ASPCAP[1][pick])
listit_1 = [0,1,2]
listit_2 = [1,0,0]
axs = [ax1,ax2,ax3]
labels = ["ASPCAP log g", "ASPCAP Teff", "ASPCAP Teff"]
for ax, num,num2,label1,x1,y1 in zip(axs, listit_1,listit_2,labels, [4800,3.0,0.3], [3400,1,-1.5]):
cind = array(input_ASPCAP[num2][pick]).flatten()
s1 = ax.scatter(input_ASPCAP[num][pick], params_labels[num][pick], c = cind, s = 30,alpha = 1.0, linewidths = 0 ,cmap = cm.jet )
c_T = fig.colorbar(s1,ax=ax)
c_T.set_label(label1,fontsize = 14,labelpad = 10 )
a,b,c1 = ax.errorbar(input_ASPCAP[num][pick], params_labels[num][pick],yerr= params_labels[num+3][pick],marker='',ls='',zorder=0, fmt = None,elinewidth = 1,capsize = 0)
a,b,c2 = ax.errorbar(input_ASPCAP[num][pick], params_labels[num][pick], xerr=input_ASPCAP[num+3][pick],marker='',ls='',zorder=0, fmt = None,elinewidth = 1,capsize = 0)
g_color = c_T.to_rgba(cind)
c1[0].set_color(g_color)
c2[0].set_color(g_color)
ax.text(x1,y1,"y-axis, $<\sigma>$ = "+str(round(mean(params_labels[num+3][pick]),2)),fontsize = 14)
ax1.plot([0,6000], [0,6000], linewidth = 1.5, color = 'k' )
ax2.plot([0,5], [0,5], linewidth = 1.5, color = 'k' )
ax3.plot([-3,2], [-3,2], linewidth = 1.5, color = 'k' )
ax1.set_xlim(3500, 5500)
ax2.set_xlim(0, 5)
ax3.set_xlim(-3, 2)
ax1.set_xlabel("ASPCAP Teff, [K]", fontsize = 14,labelpad = 5)
ax1.set_ylabel("NHR+ Teff, [K]", fontsize = 14,labelpad = 5)
ax2.set_xlabel("ASPCAP logg, [dex]", fontsize = 14,labelpad = 5)
ax2.set_ylabel("NHR+ logg, [dex]", fontsize = 14,labelpad = 5)
ax3.set_xlabel("ASPCAP [Fe/H], [dex]", fontsize = 14,labelpad = 5)
ax3.set_ylabel("NHR+ [Fe/H], [dex]", fontsize = 14,labelpad = 5)
ax1.set_ylim(min(tme)-250, max(tme)+250)
ax2.set_ylim(round(min(gme)-0.2,1), round(max(gme)+0.2,1))
ax3.set_ylim(min(fehme)-0.4, max(fehme)+0.4)
# attach lines to plots
fig.subplots_adjust(hspace=0.22)
#fig.savefig('/Users/ness/Downloads/Apogee_Raw/calibration_apogeecontinuum/documents/plots/fits_all3.eps', transparent=True, bbox_inches='tight', pad_inches=0)
#fig.savefig('/Users/ness/Downloads/Apogee_Raw/calibration_apogeecontinuum/documents/plots/fits_all3_continuumcut.eps', transparent=True, bbox_inches='tight', pad_inches=0.2)
#fig.savefig('/Users/ness/Downloads/Apogee_Raw/calibration_apogeecontinuum/documents/plots/fits_all3_continuumcut2.eps', transparent=True, bbox_inches='tight', pad_inches=0.2)
#fig.savefig('/Users/ness/Downloads/Apogee_Raw/calibration_apogeecontinuum/documents/plots/fits_all3.eps', transparent=True, bbox_inches='tight', pad_inches=0.2)
#fig.subplots_adjust(wspace=0.3)
| mit |
dkasak/pacal | pacal/depvars/copulas.py | 1 | 35176 | """Set of copulas different types"""
from pacal.integration import *
from pacal.interpolation import *
from matplotlib.collections import PolyCollection
import pacal.distr
#from pacal import *
from pacal.segments import PiecewiseDistribution, MInfSegment, PInfSegment, Segment, _segint
from pacal.segments import PiecewiseFunction
from pacal.distr import Distr
from pacal.standard_distr import *
#from pacal.nddistr import NDDistr, NDInterpolatedDistr, NDFun
from pacal.utils import epsunique, bisect, fmin2
from pacal.indeparith import _findSegList, convdiracs
from pacal.integration import integrate_fejer2, integrate_iter
from pacal.depvars.nddistr import NDDistr, NDFun
import pylab as plt
import sympy
import numpy as np
from sympy import Symbol, diff, pprint, simplify
from pylab import meshgrid, contour, xlabel, ylabel, gca, figure, axis
import mpl_toolkits.mplot3d.axes3d as p3
try:
from scipy.optimize.optimize import fminbound
have_Scipy_optimize = True
except ImportError:
have_Scipy_optimize = False
class Copula(NDDistr):
def __init__(self, marginals=None):
self.marginals = marginals
super(Copula, self).__init__(len(self.marginals), Vars=self.marginals)
self.a, self.b = self.ranges()
def ranges(self):
vars = self.marginals
a = zeros_like(vars)
b = zeros_like(vars)
for i in range(len(vars)):
a[i], b[i] = vars[i].range()
return a, b
def setMarginals(self, *marginals):
if len(marginals) > 0 and isinstance(marginals[0], pacal.distr.Distr):
self.marginals = marginals
def pdf(self, *X):
"""joint probability density function with marginals *X"""
if self.marginals is None or len(self.marginals) == 0:
U = UniformDistr()
F = [U.get_piecewise_cdf_interp()(X[i]) for i in range(len(X))]
return self.cpdf(*F)
else:
#assert len(self.marginals) >= len(X)
mi = ones_like(X[0])
for i in range(len(X)):
mi = mi * self.marginals[i].get_piecewise_pdf()(X[i])
F = [self.marginals[i].get_piecewise_cdf_interp()(X[i]) for i in range(len(X))]
return np.nan_to_num(self.cpdf(*F) * mi)
#return self.cpdf(*F) * mi
def cdf(self, *X):
"""joint cumulative distribution function with given marginals at point (x,y)"""
if self.marginals is None or len(self.marginals) == 0:
return self.ccdf(*X)
else:
F = [self.marginals[i].get_piecewise_cdf_interp()(X[i]) for i in range(len(X))]
return self.ccdf(*F)
def dualcdf(self, *X):
si = zeros_like(X[0])
for i in range(len(X)):
si += self.marginals[i].get_piecewise_cdf_interp()(X[i])
return si - self.ccdf(*X)
def jpdf_(self, f, g, x, y):
"""joint probability density function with marginals *X"""
if isinstance(f, Distr):
return self.cpdf(f.get_piecewise_cdf_interp()(x), g.get_piecewise_cdf_interp()(y)) * f.get_piecewise_pdf()(x) * g.get_piecewise_pdf()(y)
else:
return self.cpdf(f.cumint()(x), g.cumint()(y)) * f(x) * g(y)
def jcdf_(self, f, g, x, y):
"""joint cumulative distribution function with marginals f, g at point (x,y)"""
#return self.ccdf(f.get_piecewise_cdf()(X), g.get_piecewise_cdf()(Y))
return self.ccdf(f.get_piecewise_cdf()(x), g.get_piecewise_cdf()(y))
def cpdf(self, *X):
"""Copula density, joint probability density function with uniform U[0,1] marginals"""
#pass
pass #return zeros_like(X[0])
def ccdf(self, *X):
"""Copula, joint cumulative distribution function with uniform U[0,1] marginals"""
pass
def debug_plot(self, n=40, show_pdf=False, azim=210, elev=30):
#Z = self.cdf(f.get_piecewise_cdf()(X), g.get_piecewise_cdf()(Y))
#Z = self.jcdf(f, g, X, Y)
if self.marginals is not None and len(self.marginals) > 1:
f, g = self.marginals[:2]
self.setMarginals((f, g))
else:
f, g = UniformDistr(), UniformDistr()
Lf, Uf = f.ci(0.01)
Lg, Ug = g.ci(0.01)
deltaf = (Uf - Lf) / n
deltag = (Ug - Lg) / n
X, Y = meshgrid(arange(Lf, Uf, deltaf), arange(Lg, Ug, deltag))
if not show_pdf:
Z = self.cdf(X, Y)
fig = figure(figsize=plt.figaspect(1))
ax = fig.add_subplot(111, projection='3d', azim=azim, elev=elev)
#ax = p3.Axes3D(fig)
xf = arange(Lf, Uf, deltaf)
xg = arange(Lg, Ug, deltag)
cf = f.cdf(xf)
cg = g.cdf(xg)
ax.plot(xf, cf, zs=Ug, zdir='y', linewidth=3.0, color="k")
ax.plot(xg, cg, zs=Uf, zdir='x', linewidth=3.0, color="k")
ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1, color='k', antialiased=True)#cmap=cm.jet
cset = ax.contour(X, Y, Z, zdir='z', color='k', offset=0)
ax.set_xlabel('$X$')
ax.set_xlim3d(Lf, Uf)
ax.set_ylabel('$Y$')
ax.set_ylim3d(Lg, Ug)
ax.set_zlabel('$Z$')
ax.set_zlim3d(0, 1)
else:
fig = figure(figsize=plt.figaspect(1))
ax2 = fig.add_subplot(111, projection='3d', azim=azim, elev=elev)
Z2 = self.pdf(X, Y)
xf = arange(Lf, Uf, deltaf)
xg = arange(Lg, Ug, deltag)
cf = f.pdf(xf)
cg = g.pdf(xg)
ax2.plot(xf, cf, zs=Ug, zdir='y', linewidth=3.0, color="k")
ax2.plot(xg, cg, zs=Uf, zdir='x', linewidth=3.0, color="k")
ax2.plot_wireframe(X, Y, Z2, rstride=1, cstride=1, color='k', antialiased=True)
cset = ax2.contour(X, Y, Z2, color='k', zdir='z', offset=0)
ax2.set_xlabel('$X$')
ax2.set_xlim3d(Lf, Uf)
ax2.set_ylabel('$Y$')
ax2.set_ylim3d(Lg, Ug)
ax2.set_zlabel('$Z$')
zlim = 1.01*np.max(array([np.max(Z2), max(cf), max(cg)]))
ax2.set_zlim3d(0,zlim)
def _segint(self, fun, L, U, force_minf = False, force_pinf = False, force_poleL = False, force_poleU = False,
debug_info = False, debug_plot = False):
#print params.integration_infinite.exponent
if L > U:
if params.segments.debug_info:
print "Warning: reversed integration interval, returning 0"
return 0, 0
if L == U:
return 0, 0
if force_minf:
#i, e = integrate_fejer2_minf(fun, U, a = L, debug_info = debug_info, debug_plot = True)
i, e = integrate_wide_interval(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
elif force_pinf:
#i, e = integrate_fejer2_pinf(fun, L, b = U, debug_info = debug_info, debug_plot = debug_plot)
i, e = integrate_wide_interval(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
elif not isinf(L) and not isinf(U):
if force_poleL and force_poleU:
i1, e1 = integrate_fejer2_Xn_transformP(fun, L, (L+U)*0.5, debug_info = debug_info, debug_plot = debug_plot)
i2, e2 = integrate_fejer2_Xn_transformN(fun, (L+U)*0.5, U, debug_info = debug_info, debug_plot = debug_plot)
i, e = i1+i2, e1+e2
elif force_poleL:
i, e = integrate_fejer2_Xn_transformP(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
elif force_poleU:
i, e = integrate_fejer2_Xn_transformN(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
else:
#i, e = integrate_fejer2(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
i, e = integrate_wide_interval(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
elif isinf(L) and isfinite(U) :
#i, e = integrate_wide_interval(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
i, e = integrate_fejer2_minf(fun, U, debug_info = debug_info, debug_plot = debug_plot, exponent = params.integration_infinite.exponent,)
elif isfinite(L) and isinf(U) :
#i, e = integrate_wide_interval(fun, L, U, debug_info = debug_info, debug_plot = debug_plot)
i, e = integrate_fejer2_pinf(fun, L, debug_info = debug_info, debug_plot = debug_plot, exponent = params.integration_infinite.exponent,)
elif L<U:
i, e = integrate_fejer2_pminf(fun, debug_info = debug_info, debug_plot = debug_plot, exponent = params.integration_infinite.exponent,)
else:
print "errors in _conv_div: x, segi, segj, L, U =", L, U
return i,e
def cov(self, i=None, j=None):
if i is not None and j is not None:
var, c_var = self.prepare_var([i, j])
dij = self.eliminate(c_var)
f, g = dij.marginals[0], self.marginals[1]
fmean = f.mean()
gmean = g.mean()
f0, f1 = f.get_piecewise_pdf().range()
g0, g1 = g.get_piecewise_pdf().range()
print fmean, gmean, var, c_var, f0, f1, g0, g1
if i == j:
c, e = c, e = integrate_fejer2(lambda x: (x - fmean) ** 2 * f.pdf(x), f0, f1)
else:
c, e = integrate_iter(lambda x, y: (x - fmean) * (y - gmean) * dij.pdf(x, y), f0, f1, g0, g1)
return c
else:
c = zeros((self.d, self.d))
for i in range(self.d):
for j in range(self.d):
c[i, j] = self.cov(i, j)
return c
def corrcoef(self, i=None, j=None):
if i is not None and j is not None:
var, c_var = self.prepare_var([i, j])
dij = self.eliminate(c_var)
f, g = dij.marginals[0], self.marginals[1]
return self.cov(i, j)/f.std()/g.std()
else:
c = zeros((self.d, self.d))
for i in range(self.d):
for j in range(self.d):
c[i, j] = self.corrcoef(i, j)
return c
def tau(self, i=None, j=None):
"""Kendall's tau: 4*\int C(x,y) dC(x,y)-1
"""
if i is not None and j is not None:
var, c_var = self.prepare_var([i, j])
dij = self.eliminate(c_var)
f, g = dij.marginals[0], self.marginals[1]
f0, f1 = f.get_piecewise_pdf().range()
g0, g1 = g.get_piecewise_pdf().range()
if i == j:
c, e = 1, 0
else:
c, e = integrate_iter(lambda x, y: dij.cdf(x, y) * dij.pdf(x, y), f0, f1, g0, g1)
c = 4 * c - 1
return c
else:
c = zeros((self.d, self.d))
for i in range(self.d):
for j in range(self.d):
c[i, j] = self.ctau(i, j)
return c
def beta(self, i=None, j=None):
"""Blomqvist's beta: 4 * C(0.5, 0.5) - 1
"""
return 4*self.ccdf(0.5,0.5)-1
def rho_s(self, i=None, j=None):
"""Spearmans rho: 12*\int x*y dC(x,y)-3 = 12 \int C(d,y)dxdy - 3
"""
if i is not None and j is not None:
var, c_var = self.prepare_var([i, j])
dij = self.eliminate(c_var)
if i == j:
c, e = 1, 0
else:
#c, e = integrate_iter(lambda x, y: x * y * dij.cpdf(x, y), 0.0, 1.0, 0.0, 1.0)
c, e = integrate_iter(lambda x, y: dij.ccdf(x, y), 0.0, 1.0, 0.0, 1.0)
c = 12 * c - 3
return c
else:
c = zeros((self.d, self.d))
for i in range(self.d):
for j in range(self.d):
c[i, j] = self.rho_s(i, j)
return c
def ctau(self, i=None, j=None):
"""Kendall's tau: 4*\int C(x,y) dC(x,y)-1
"""
if i is not None and j is not None:
var, c_var = self.prepare_var([i, j])
dij = self.eliminate(c_var)
if i == j:
c, e = 1, 0
else:
#c, e = integrate_iter(lambda x, y: x * y * dij.cpdf(x, y), 0.0, 1.0, 0.0, 1.0)
c, e = integrate_iter(lambda x, y: dij.ccdf(x, y) * dij.cpdf(x, y), 0.0, 1.0, 0.0, 1.0)
c = 4 * c - 1
return c
else:
c = zeros((self.d, self.d))
for i in range(self.d):
for j in range(self.d):
c[i, j] = self.ctau(i, j)
return c
class PiCopula(Copula):
def __init__(self, marginals=None):
super(PiCopula, self).__init__(marginals=marginals)
def cpdf(self, *X):
return ones_like(X[0])
def ccdf(self, *X):
pi = ones_like(X[0])
for xi in X:
pi *= xi
return pi
class MCopula(Copula):
def __init__(self, marginals=None):
super(MCopula, self).__init__(marginals)
self._segint = self._segmin
def cpdf(self, *X):
return zeros_like(X[0])#self.ccdf(*X)
def ccdf(self, *X):
mi = zeros_like(X[0])+1
for xi in X[0:]:
xia = array(xi)
ind = xia < mi
if isscalar(mi) | size(mi)==1:
mi = xia
else:
mi[ind] = xia[ind]
return mi
def _segmin(self, fun, L, U, force_minf = False, force_pinf = False, force_poleL = False, force_poleU = False,
debug_info = False, debug_plot = False):
xopt = fmin2(lambda x: fun(float(x)), L, U, xtol = 1e-16)
return xopt, 0#fun(xopt), 0
def debug_plot(self, n=40, show_pdf=False, azim=210, elev=30):
#Z = self.cdf(f.get_piecewise_cdf()(X), g.get_piecewise_cdf()(Y))
#Z = self.jcdf(f, g, X, Y)
if self.marginals is not None and len(self.marginals) > 1:
f, g = self.marginals[:2]
self.setMarginals((f, g))
else:
f, g = UniformDistr(), UniformDistr()
Lf, Uf = f.ci(0.01)
Lg, Ug = g.ci(0.01)
deltaf = (Uf - Lf) / n
deltag = (Ug - Lg) / n
X, Y = meshgrid(arange(Lf, Uf, deltaf), arange(Lg, Ug, deltag))
if not show_pdf:
Z = self.cdf(X, Y)
Z2 = self.pdf(X, Y)
fig = figure(figsize=plt.figaspect(1))
ax = fig.add_subplot(111, projection='3d', azim=azim, elev=elev)
#ax = p3.Axes3D(fig)
xf = arange(Lf, Uf, deltaf)
xg = arange(Lg, Ug, deltag)
cf = f.cdf(xf)
cg = g.cdf(xg)
ax.plot(xf, cf, zs=Ug, zdir='y', linewidth=3.0, color="k")
ax.plot(xg, cg, zs=Uf, zdir='x', linewidth=3.0, color="k")
cset = ax.contour(X, Y, Z, zdir='z', offset=0)
ax.plot_wireframe(X, Y, Z, rstride=1, cstride=1, color='k', antialiased=True)#cmap=cm.jet
ax.set_xlabel('$X$')
ax.set_xlim3d(Lf, Uf)
ax.set_ylabel('$Y$')
ax.set_ylim3d(Lg, Ug)
ax.set_zlabel('$Z$')
ax.set_zlim3d(0, 1)
# wykres F(x)=G(Y)
else:
fig = figure(figsize=plt.figaspect(1))
ax = fig.add_subplot(111, projection='3d')
#ax = fig.add_subplot(122, projection='3d')
t = linspace(0.01, 0.99,40)
X = f.quantile(t)
Y = g.quantile(t)
Z = f(X)*g(Y)
cf = f.pdf(xf)
cg = g.pdf(xg)
ax.plot(xf, cf, zs=Ug, zdir='y', linewidth=3.0, color="k")
ax.plot(xg, cg, zs=Uf, zdir='x', linewidth=3.0, color="k")
ax.plot_surface(np.vstack([X,X]), np.vstack([Y,Y]), np.vstack([np.zeros_like(Z),Z]),
cstride = 1, rstride = 1,# cmap=cm.jet,
linewidth = -1, edgecolor="k", color = "c", alpha=0.7, antialiased = True)
ax.axis((Lf, Uf, Lg, Ug))
zlim = 1.01*np.max(array([max(Z), max(cf), max(cg)]))
ax.set_zlim3d(0,zlim)
class WCopula(MCopula):
def __init__(self, marginals=None):
super(WCopula, self).__init__(marginals)
self._segint = self._segmax
def cpdf(self, *X):
return zeros_like(X[0])#self.ccdf(*X)
def ccdf(self, *X):
si = zeros_like(X[0])
for xi in X[0:]:
si += array(xi)
si = si - 1
ind = (si < 0)
if isscalar(si) | size(si)==1:
if ind:
si = 0.0
else:
si[ind] = 0
return si
def _segmax(self, fun, L, U, force_minf = False, force_pinf = False, force_poleL = False, force_poleU = False,
debug_info = False, debug_plot = False):
#xopt = fminbound(fun, L, U, xtol = 1e-16)
#xopt = fminbound(lambda x: 100-fun(float(x)), L, U, xtol = 1e-16)
xopt = fmin2(lambda x: 1-fun(float(x)), L, U, xtol = 1e-16)
return xopt, 0
def _segmin(self, fun, L, U, force_minf = False, force_pinf = False, force_poleL = False, force_poleU = False,
debug_info = False, debug_plot = False):
#xopt = fminbound(fun, L, U, xtol = 1e-16)
#xopt = fminbound(lambda x: 100-fun(float(x)), L, U, xtol = 1e-16)
xopt = fmin2(lambda x: fun(float(x)), L, U, xtol = 1e-16)
return xopt, 0
class ArchimedeanCopula(Copula):
# TODO
def __init__(self, fi=log, fi_deriv=lambda s: 1 / s,
fi_inv=exp, fi_inv_nth_deriv=exp,
marginals=None):
super(ArchimedeanCopula, self).__init__(marginals)
#self.theta = Symbol('theta')
self.fi = fi
self.fi_deriv = fi_deriv
self.fi_inv = fi_inv
self.fi_inv_nth_deriv = fi_inv_nth_deriv
#self.debug_info_()
def debug_info_(self):
vars = self.symVars
#for i in range(self.d):
# vars.append(sympy.Symbol("u{0}".format(i + 1)))
si = 0
for i in range(self.d):
si += self.fi(vars[i])
pi = 1;
for i in range(self.d):
pi *= self.fi_deriv(vars[i])
print "si=\n", pprint(si)
print "pi=\n", pprint(pi)
print "C=\n", pprint(self.fi_inv(si))
#print "C=\n", pprint(self.ccdf(*tuple(vars)))
#print "c=\n", pprint(sympy.simplify(self.fi_inv_nth_deriv(si) * pi))
print "c=\n", pprint(self.fi_inv_nth_deriv(si) * pi)
#print "c=\n", pprint(self.cpdf(*tuple(vars)))
def tau_c(self):
return 1 + 4 * integrate_fejer2(lambda t : self.fi(t) / self.fi_deriv(t), 0, 1)[0]
def cpdf(self, *X):
assert len(X) == len(self.marginals), "incorrect copula dimension"
si = zeros_like(X[0])
for xi in X:
si = si + self.fi(xi)
si = self.fi_inv_nth_deriv(si)
pi = ones_like(X[0])
for xi in X:
pi = pi * self.fi_deriv(xi)
return si * pi
def ccdf(self, *X):
assert len(X) == len(self.marginals), "incorrect copula dimension"
si = zeros_like(X[0])
for xi in X:
si += self.fi(xi)
ind = (si < 0) # or isnan(si)
#if len(ind)>0:
si[ind] = 0.0
si = self.fi_inv(si)
return si
class ArchimedeanSymbolicCopula(ArchimedeanCopula):
# TODO
def __init__(self,
fi=lambda t, theta: log(t),
fi_inv=None, #lambda t, theta:(-sympy.log(t)) ** theta,
theta=2,
marginals=None):
self.theta = float(theta)#Symbol('theta')
self.t = Symbol('t')
self.s = Symbol('s')
self.d = len(marginals)
self.fi_ = fi
self.fi_inv_ = fi_inv
self.sym_fi = fi(self.t, self.theta)
self.sym_fi_deriv = sympy.diff(self.sym_fi, self.t)
if fi_inv is None:
self.sym_fi_inv = sympy.solve(self.sym_fi - self.s, self.t)[0]
else:
self.sym_fi_inv = fi_inv(self.s, self.theta)
self.sym_fi_inv_nth_deriv = sympy.diff(self.sym_fi_inv, self.s, self.d)
#self.debug_info()
super(ArchimedeanSymbolicCopula, self).__init__(fi=sympy.lambdify(self.t, self.sym_fi, "numpy"),
fi_deriv=sympy.lambdify(self.t, self.sym_fi_deriv, "numpy"),
fi_inv=sympy.lambdify(self.s, self.sym_fi_inv, "numpy"),
fi_inv_nth_deriv=sympy.lambdify(self.s, self.sym_fi_inv_nth_deriv, "numpy"),
marginals=marginals)
vars = self.symVars
si = 0
for i in range(self.d):
si += self.fi_(vars[i], self.theta)
self.sym_C = self.fi_inv_(si, self.theta)
def eliminate(self, var):
var, c_var = self.prepare_var(var)
c_marginals = [self.marginals[i] for i in c_var]
if len(var) == 0:
return self
return ArchimedeanSymbolicCopula(fi=self.fi_,
fi_inv=self.fi_inv_,
theta=self.theta,
marginals=c_marginals)
def ccond(self, var):
"""It returns conditional copula f([var, c_vars]) = C(c_var | var)
"""
var, c_var = self.prepare_var(var)
symvars = [self.symVars[i] for i in var]
DC = self.sym_C
for i in range(len(self.Vars)):
if i in set(var):
DC = sympy.diff(DC, self.symVars[i])
else:
pass
dC = sympy.lambdify(self.symVars, DC, "numpy")
return NDFun(self.d, self.Vars, sympy.lambdify(self.symVars, DC, "numpy"))
def condition(self, var, *X):
"""It returns conditional pdf for given copula
f(c_var) = Pr(c_var | var=X)
"""
var, c_var = self.prepare_var(var)
num = self.pdf
den = self.eliminate(c_var)
def fun_(*Y_):
j, k = 0, 0
Y, Yvar = [], []
#dF = ones_like(X[0])
for i in range(len(self.Vars)):
if i in set(var):
Y.append(X[j])
Yvar.append(X[j])
j += 1
else:
Y.append(Y_[k])
k += 1
return num(*Y) / den.pdf(*X)
return NDFun(len(c_var), [self.Vars[i] for i in c_var], fun_)
def conditionCDF(self, var, *X):
"""It returns conditional cdf for given copula
f(c_var) = Pr(Y<c_var | var=X)
"""
funcond = self.ccond(var)
var, c_var = self.prepare_var(var)
new_cond = var
def fun_(*Y_):
j, k = 0, 0
Y = []
dF = ones_like(X[0])
for i in range(len(self.Vars)):
if i in set(var):
Y.append(self.marginals[i].get_piecewise_cdf()(X[j]))
j += 1
else:
Y.append(self.marginals[i].get_piecewise_cdf()(Y_[k]))
dF *= self.marginals[i].get_piecewise_pdf()(Y_[k])
k += 1
return funcond(*Y)
return NDFun(len(new_cond), [self.Vars[i] for i in c_var], fun_)
def condfun(self, var):
"""It returns conditional cdf function f([var, c_vars]) = Pr(Y<c_var | var)
"""
funcond = self.ccond(var)
var, c_var = self.prepare_var(var)
new_cond = var
def fun_(*X):
j, k = 0, 0
Y = []
dF = ones_like(X[0])
for i in range(len(self.Vars)):
Y.append(self.marginals[i].get_piecewise_cdf()(X[i]))
if i in set(var):
pass
else:
dF *= self.marginals[i].get_piecewise_pdf()(X[i])
return funcond(*Y)
return NDFun(self.d, self.Vars, fun_)
def debug_info(self):
#self.fi_inv_defiv = simplify(sympy.diff(self.sym_fi_inv(self.s, self.theta), self.s))
print "theta=", self.theta
print "fi(theta)=", self.fi_(self.t, sympy.Symbol("theta"))
print "fi=\n", pprint(self.sym_fi)
print "fi_deriv=\n", pprint(self.sym_fi_deriv)
print "fi_inv=\n", self.sym_fi_inv, ",\n", pprint(self.sym_fi_inv)
print "fi_inv_nth_deriv=\n", pprint(self.sym_fi_inv_nth_deriv)
print "fi=\n", sympy.latex(self.sym_fi)
print "fi_deriv=\n", sympy.latex(self.sym_fi_deriv)
print "fi_inv=\n", self.sym_fi_inv, ",\n", sympy.latex(self.sym_fi_inv)
print "fi_inv_nth_deriv=\n", sympy.latex(self.sym_fi_inv_nth_deriv)
def rand2d_invcdf(self, n):
u = self.marginals[0].rand_invcdf(n)
t = UniformDistr().rand(n)
v = zeros_like(t)
for i in range(len(u)):
#Cd = self.condition([0],u[i])
#print i
v[i] = self.conditionCDF([0], u[i]).distr_pdf.inverse(t[i])
#v[i] = bisect(lambda x : condition(x,u[i])-t[i], 1e-50,1)
return u, v
class GumbelCopula2d(Copula):
def __init__(self, theta=3.1, marginals=None):
super(GumbelCopula2d, self).__init__(marginals)
self.theta = theta
self.one_over_theta = 1.0 / theta
self.theta_square = theta ** 2
def fi(self, t):
return pow(-np.log(t), self.theta)# ** self.theta
def fi_inv(self, s):
return exp(-s ** self.one_over_theta)
def cpdf(self, *X):
si = zeros_like(X[0])
for xi in X:
si += self.fi(xi)
si = self.fi_inv(si) * (si ** (self.one_over_theta - 2.0) * (-1.0 + self.theta + si ** self.one_over_theta)) / self.theta_square
for xi in X:
si *= self.theta * self.fi(xi) ** (1 - self.one_over_theta) / xi
return si
def ccdf(self, *X):
si = zeros_like(X[0])
for xi in X:
si += self.fi(xi)
si = self.fi_inv(si)
return si
class GumbelCopula(ArchimedeanSymbolicCopula):
"""Clayton copula, C(theta=-1) = W, C(theta=0) = Pi, C(theta=+Inf) = M"""
def __init__(self, theta=3.1, marginals=None):
super(GumbelCopula, self).__init__(fi=self.fi_, fi_inv=self.fi_inv_,
theta=theta, marginals=marginals)
def fi_(self, t, theta):
return (-sympy.log(t)) ** theta
def fi_inv_(self, s, theta):
return sympy.exp(-(s ** (1 / theta)))
class ClaytonCopula(ArchimedeanSymbolicCopula):
"""Clayton copula, C(theta=-1) = W, C(theta=0) = Pi, C(theta=+Inf) = M"""
def __init__(self, theta=3.1, marginals=None):
super(ClaytonCopula, self).__init__(fi=self.fi_, fi_inv=self.fi_inv_,
theta=theta, marginals=marginals)
# theta = float(theta)
# self.theta = theta
# self.one_over_theta = 1.0 / theta
# self.theta_square = theta ** 2
def fi_(self, t, theta):
return 1 / theta * (t ** (-theta) - 1)
# #return self.one_over_theta * (pow(t, -self.theta) - 1.0)
def fi_inv_(self, s, theta):
return (1 + s * theta) ** (-1 / theta)
# def cpdf(self, *X):
# si = zeros_like(X[0])
# for xi in X:
# si += xi ** -self.theta
# si = si - 1
# ind = (si < 0) # or isnan(si)
# si[ind] = 0
# si = (1 + self.theta) / self.theta_square * si ** (-(self.one_over_theta + 2))
# for xi in X:
# si *= -self.theta * xi ** (-self.theta - 1)
# return si
# def ccdf(self, *X):
# si = zeros_like(X[0])
# for xi in X:
# si += self.fi(xi)
# ind = si < 0
# si[ind] = 0
# si = self.fi_inv(si)
# return si
class FrankCopula(ArchimedeanSymbolicCopula):
"""Clayton copula, C(theta=-1) = W, C(theta=0) = Pi, C(theta=+Inf) = M"""
def __init__(self, theta=3.1, marginals=None):
self.const2 = exp(-theta) - 1.0
super(FrankCopula, self).__init__(fi=self.fi_, fi_inv=self.fi_inv_,
theta=theta, marginals=marginals)
def fi_(self, t, theta):
return -sympy.log((sympy.exp(-t * theta) - 1) / (exp(-self.theta) - 1.0))
# def fi_(self, t, theta):
# return - log((exp(-t * theta) - 1) / (exp(-self.theta) - 1.0))
def fi_inv_(self, s, theta):
return -sympy.log(sympy.exp(-s - theta) - sympy.exp(-s) + 1) / theta
class FrankCopula2d(Copula):
"""Frank copula, C(theta=-Inf) = W, C(theta=0)~Pi, C(theta=+Inf)=M
B3 in H. Joe pp. 139-
"""
def __init__(self, theta=1.0, marginals=None):
self.theta = theta # delta
self.eta = -expm1(-self.theta)
self.one_over_theta = 1.0 / theta
self.theta_square = theta ** 2
super(FrankCopula2d, self).__init__(marginals)
def fi(self, t):
return logexp_m1(t * self.theta) - logexp_m1(self.theta)
def fi_inv(self, s):
if expm1(-self.theta) > 0:
return -1.0 / self.theta * logexp_p1(-s, expm1(-self.theta))
elif expm1(-self.theta) < 0:
return -1.0 / self.theta * log_1m_exp(-s, expm1(-self.theta))
else:
return -1.0 / self.theta * logexp_p1(-s, 0)
def cpdf(self, *X):
si = zeros_like(X[0])
pi = ones_like(X[0])
n = len(X)
for xi in X:
si += xi
pi *= -np.expm1(-self.theta * xi)
yi = self.theta * self.eta * np.exp(-self.theta * si) / (self.eta - pi) ** n
return yi
def ccdf(self, *X):
pi = ones_like(X[0])
for xi in X:
pi *= -expm1(-self.theta * xi)
yi = -self.one_over_theta * np.log1p(-pi / self.eta)
return yi
def logexp_p1(x, a=1.0):
"""return log(a*exp(x) + 1)"""
x = x + log(abs(a))
yy = log1p(exp(x))
ind = exp(x) > 1e16
yy[ind] = x[ind]
ind = exp(x) < 1e-16
yy[ind] = exp(x[ind])
return yy
def logexp_m1(x, a=1.0):
"""return -log(a*exp(-x) - 1)"""
x = x + log(abs(a))
yy = -log(abs(expm1(-x)))
if isscalar(x):
if exp(-x) > 1e16:
yy = x
if exp(-x) < 1e-16:
yy = exp(-x)
else:
ind = exp(-x) > 1e16
yy[ind] = x[ind]
ind = exp(-x) < 1e-16
yy[ind] = exp(-x[ind])
return yy
def log_1m_exp(x, a=1.0):
"""return -log(1-a*exp(-x))"""
x = x + log(abs(a))
yy = log(abs(expm1(x)))
ind = exp(x) > 1e16
yy[ind] = x[ind]
ind = exp(x) < 1e-16
yy[ind] = -exp(x[ind])
return yy
def convmean(F, G, p=0.5, q=0.5, theta=1.0):
"""Probabilistic weighted mean of f and g
"""
f = F.get_piecewise_pdf()
g = G.get_piecewise_pdf()
if p + q <> 1.0 :
p1 = abs(p) / (abs(p) + abs(q))
q = abs(q) / (abs(p) + abs(q))
p = p1;
if q == 0:
return f;
bf = f.getBreaks()
bg = g.getBreaks()
b = add.outer(bf * p, bg * q)
fun = lambda x : convmeanx(F, G, segList, x, p, q, theta=theta)
ub = epsunique(b)
fg = PiecewiseDistribution([]);
op = lambda x, y : p * x + q * y;
if isinf(ub[0]):
segList = _findSegList(f, g, ub[1] - 1, op)
seg = MInfSegment(ub[1], fun)
segint = seg.toInterpolatedSegment()
fg.addSegment(segint)
ub = ub[1:]
if isinf(ub[-1]):
segList = _findSegList(f, g, ub[-2] + 1, op)
seg = PInfSegment(ub[-2], fun)
segint = seg.toInterpolatedSegment()
fg.addSegment(segint)
ub = ub[0:-1]
for i in range(len(ub) - 1) :
segList = _findSegList(f, g, (ub[i] + ub[i + 1]) / 2, op)
seg = Segment(ub[i], ub[i + 1], fun)
segint = seg.toInterpolatedSegment()
fg.addSegment(segint)
# Discrete parts of distributions
fg_discr = convdiracs(f, g, fun=lambda x, y : x * p + y * q)
for seg in fg_discr.getDiracs():
fg.addSegment(seg)
return fg
def convmeanx(F, G, segList, xx, p=0.5, q=0.5, theta=2):
"""Probabilistic weighted mean of f and g, integral at points xx
"""
if size(xx) == 1:
xx = asfarray([xx])
wyn = zeros_like(xx)
#P = PiCopula()
#P = GumbelCopula(theta)
P = FrankCopula2d(theta)
#P.corrcoef()
#P = ClaytonCopula(theta)
#fun = lambda t : P.fun(segi( t / p)/q, segj((x - t)/q)/q)
#W = PiCopula()
#fun = lambda t : P.ccdf(segi(t / p) / p / q, segj((x - t) / q) / p / q)
fun = lambda t : P.jpdf(F, G, (t / p), (x - t) / q) / p / q
for j in range(len(xx)) :
x = xx[j]
I = 0
err = 0
for segi, segj in segList:
if segi.isSegment() and segj.isSegment():
L = max(segi.a * p, (x - segj.b * q))
U = min(segi.b * p, (x - segj.a * q))
i, e = _segint(fun, L, U)
#elif segi.isDirac() and segj.isSegment():
# i = segi.f*segj((x-segi.a)/q)/q # TODO
# e=0;
#elif segi.isSegment() and segj.isDirac():
# i = segj.f*segi((x-segj.a)/p)/p # TODO
# e=0;
#elif segi.isDirac() and segj.isDirac():
# pass
# #i = segi(x-segj.a)/p/q # TODO
# #e=0;
I += i
err += e
wyn[j] = I
return wyn
if __name__ == "__main__":
from pylab import *
from nddistr import plot_2d_distr
# # ========= ArchimedeanCopulas tests ============================
# A = ArchimedeanSymbolicCopula(fi=lambda t, theta : 1 / theta * (t ** (-theta) - 1),
# #fi_inv=lambda s, theta : (1+ theta*s) ** (-1/theta),
# theta=1.0,
# marginals=[BetaDistr(4, 4, sym="X"), BetaDistr(2, 4, sym="Y"), BetaDistr(5, 3, sym="Z")])
# #BetaDistr(2, 3).summary()
from pacal.depvars.nddistr import *
c = ClaytonCopula(theta = 0.2, marginals=[UniformDistr(), UniformDistr()])
c.plot()
d = IJthOrderStatsNDDistr(UniformDistr(), 10, 1, 10)
plot_2d_distr(d)
show()
0/0
marginals = [BetaDistr(5, 2, sym="X"), BetaDistr(3, 6, sym="Y")]
C = FrankCopula(10, marginals)
C.plot()
plot_2d_distr(C)
C_condition_y_05 = C.condition([1], 0.5)
figure()
C_condition_y_05.distr_pdf.plot()
#print C_condition_y_05.distr_pdf.summary()
show()
0 / 0
| gpl-3.0 |
dsm054/pandas | pandas/core/strings.py | 1 | 100158 | # -*- coding: utf-8 -*-
import numpy as np
from pandas.compat import zip
from pandas.core.dtypes.generic import ABCSeries, ABCIndex
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.common import (
ensure_object,
is_bool_dtype,
is_categorical_dtype,
is_object_dtype,
is_string_like,
is_list_like,
is_scalar,
is_integer,
is_re)
import pandas.core.common as com
from pandas.core.algorithms import take_1d
import pandas.compat as compat
from pandas.core.base import NoNewAttributesMixin
from pandas.util._decorators import Appender
import re
import pandas._libs.lib as lib
import pandas._libs.ops as libops
import warnings
import textwrap
import codecs
_cpython_optimized_encoders = (
"utf-8", "utf8", "latin-1", "latin1", "iso-8859-1", "mbcs", "ascii"
)
_cpython_optimized_decoders = _cpython_optimized_encoders + (
"utf-16", "utf-32"
)
_shared_docs = dict()
def cat_core(list_of_columns, sep):
"""
Auxiliary function for :meth:`str.cat`
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns
Returns
-------
nd.array
The concatenation of list_of_columns with sep
"""
list_with_sep = [sep] * (2 * len(list_of_columns) - 1)
list_with_sep[::2] = list_of_columns
return np.sum(list_with_sep, axis=0)
def _na_map(f, arr, na_result=np.nan, dtype=object):
# should really _check_ for NA
return _map(f, arr, na_mask=True, na_value=na_result, dtype=dtype)
def _map(f, arr, na_mask=False, na_value=np.nan, dtype=object):
if not len(arr):
return np.ndarray(0, dtype=dtype)
if isinstance(arr, ABCSeries):
arr = arr.values
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr, dtype=object)
if na_mask:
mask = isna(arr)
try:
convert = not all(mask)
result = lib.map_infer_mask(arr, f, mask.view(np.uint8), convert)
except (TypeError, AttributeError) as e:
# Reraise the exception if callable `f` got wrong number of args.
# The user may want to be warned by this, instead of getting NaN
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
if len(e.args) >= 1 and re.search(p_err, e.args[0]):
raise e
def g(x):
try:
return f(x)
except (TypeError, AttributeError):
return na_value
return _map(g, arr, dtype=dtype)
if na_value is not np.nan:
np.putmask(result, mask, na_value)
if result.dtype == object:
result = lib.maybe_convert_objects(result)
return result
else:
return lib.map_infer(arr, f)
def str_count(arr, pat, flags=0):
"""
Count occurrences of pattern in each string of the Series/Index.
This function is used to count the number of times a particular regex
pattern is repeated in each of the string elements of the
:class:`~pandas.Series`.
Parameters
----------
pat : str
Valid regular expression.
flags : int, default 0, meaning no flags
Flags for the `re` module. For a complete list, `see here
<https://docs.python.org/3/howto/regex.html#compilation-flags>`_.
**kwargs
For compatibility with other string methods. Not used.
Returns
-------
counts : Series or Index
Same type as the calling object containing the integer counts.
Notes
-----
Some characters need to be escaped when passing in `pat`.
eg. ``'$'`` has a special meaning in regex and must be escaped when
finding this literal character.
See Also
--------
re : Standard library module for regular expressions.
str.count : Standard library version, without regular expression support.
Examples
--------
>>> s = pd.Series(['A', 'B', 'Aaba', 'Baca', np.nan, 'CABA', 'cat'])
>>> s.str.count('a')
0 0.0
1 0.0
2 2.0
3 2.0
4 NaN
5 0.0
6 1.0
dtype: float64
Escape ``'$'`` to find the literal dollar sign.
>>> s = pd.Series(['$', 'B', 'Aab$', '$$ca', 'C$B$', 'cat'])
>>> s.str.count('\\$')
0 1
1 0
2 1
3 2
4 2
5 0
dtype: int64
This is also available on Index
>>> pd.Index(['A', 'A', 'Aaba', 'cat']).str.count('a')
Int64Index([0, 0, 2, 1], dtype='int64')
"""
regex = re.compile(pat, flags=flags)
f = lambda x: len(regex.findall(x))
return _na_map(f, arr, dtype=int)
def str_contains(arr, pat, case=True, flags=0, na=np.nan, regex=True):
"""
Test if pattern or regex is contained within a string of a Series or Index.
Return boolean Series or Index based on whether a given pattern or regex is
contained within a string of a Series or Index.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Flags to pass through to the re module, e.g. re.IGNORECASE.
na : default NaN
Fill value for missing values.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
Series or Index of boolean values
A Series or Index of boolean values indicating whether the
given pattern is contained within the string of each element
of the Series or Index.
See Also
--------
match : analogous, but stricter, relying on re.match instead of re.search
Series.str.startswith : Test if the start of each string element matches a
pattern.
Series.str.endswith : Same as startswith, but tests the end of string.
Examples
--------
Returning a Series of booleans using only a literal pattern.
>>> s1 = pd.Series(['Mouse', 'dog', 'house and parrot', '23', np.NaN])
>>> s1.str.contains('og', regex=False)
0 False
1 True
2 False
3 False
4 NaN
dtype: object
Returning an Index of booleans using only a literal pattern.
>>> ind = pd.Index(['Mouse', 'dog', 'house and parrot', '23.0', np.NaN])
>>> ind.str.contains('23', regex=False)
Index([False, False, False, True, nan], dtype='object')
Specifying case sensitivity using `case`.
>>> s1.str.contains('oG', case=True, regex=True)
0 False
1 False
2 False
3 False
4 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN` replaces NaN values
with `False`. If Series or Index does not contain NaN values
the resultant dtype will be `bool`, otherwise, an `object` dtype.
>>> s1.str.contains('og', na=False, regex=True)
0 False
1 True
2 False
3 False
4 False
dtype: bool
Returning 'house' or 'dog' when either expression occurs in a string.
>>> s1.str.contains('house|dog', regex=True)
0 False
1 True
2 True
3 False
4 NaN
dtype: object
Ignoring case sensitivity using `flags` with regex.
>>> import re
>>> s1.str.contains('PARROT', flags=re.IGNORECASE, regex=True)
0 False
1 False
2 True
3 False
4 NaN
dtype: object
Returning any digit using regular expression.
>>> s1.str.contains('\\d', regex=True)
0 False
1 False
2 False
3 True
4 NaN
dtype: object
Ensure `pat` is a not a literal pattern when `regex` is set to True.
Note in the following example one might expect only `s2[1]` and `s2[3]` to
return `True`. However, '.0' as a regex matches any character
followed by a 0.
>>> s2 = pd.Series(['40','40.0','41','41.0','35'])
>>> s2.str.contains('.0', regex=True)
0 True
1 True
2 False
3 True
4 False
dtype: bool
"""
if regex:
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
if regex.groups > 0:
warnings.warn("This pattern has match groups. To actually get the"
" groups, use str.extract.", UserWarning,
stacklevel=3)
f = lambda x: bool(regex.search(x))
else:
if case:
f = lambda x: pat in x
else:
upper_pat = pat.upper()
f = lambda x: upper_pat in x
uppered = _na_map(lambda x: x.upper(), arr)
return _na_map(f, uppered, na, dtype=bool)
return _na_map(f, arr, na, dtype=bool)
def str_startswith(arr, pat, na=np.nan):
"""
Test if the start of each string element matches a pattern.
Equivalent to :meth:`str.startswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the start of each string element.
See Also
--------
str.startswith : Python standard library string method.
Series.str.endswith : Same as startswith, but tests the end of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'Bear', 'cat', np.nan])
>>> s
0 bat
1 Bear
2 cat
3 NaN
dtype: object
>>> s.str.startswith('b')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.startswith('b', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.startswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_endswith(arr, pat, na=np.nan):
"""
Test if the end of each string element matches a pattern.
Equivalent to :meth:`str.endswith`.
Parameters
----------
pat : str
Character sequence. Regular expressions are not accepted.
na : object, default NaN
Object shown if element tested is not a string.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
See Also
--------
str.endswith : Python standard library string method.
Series.str.startswith : Same as endswith, but tests the start of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(['bat', 'bear', 'caT', np.nan])
>>> s
0 bat
1 bear
2 caT
3 NaN
dtype: object
>>> s.str.endswith('t')
0 True
1 False
2 False
3 NaN
dtype: object
Specifying `na` to be `False` instead of `NaN`.
>>> s.str.endswith('t', na=False)
0 True
1 False
2 False
3 False
dtype: bool
"""
f = lambda x: x.endswith(pat)
return _na_map(f, arr, na, dtype=bool)
def str_replace(arr, pat, repl, n=-1, case=None, flags=0, regex=True):
r"""
Replace occurrences of pattern/regex in the Series/Index with
some other string. Equivalent to :meth:`str.replace` or
:func:`re.sub`.
Parameters
----------
pat : string or compiled regex
String can be a character sequence or regular expression.
.. versionadded:: 0.20.0
`pat` also accepts a compiled regex.
repl : string or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
See :func:`re.sub`.
.. versionadded:: 0.20.0
`repl` also accepts a callable.
n : int, default -1 (all)
Number of replacements to make from start
case : boolean, default None
- If True, case sensitive (the default if `pat` is a string)
- Set to False for case insensitive
- Cannot be set if `pat` is a compiled regex
flags : int, default 0 (no flags)
- re module flags, e.g. re.IGNORECASE
- Cannot be set if `pat` is a compiled regex
regex : boolean, default True
- If True, assumes the passed-in pattern is a regular expression.
- If False, treats the pattern as a literal string
- Cannot be set to False if `pat` is a compiled regex or `repl` is
a callable.
.. versionadded:: 0.23.0
Returns
-------
Series or Index of object
A copy of the object with all matching occurrences of `pat` replaced by
`repl`.
Raises
------
ValueError
* if `regex` is False and `repl` is a callable or `pat` is a compiled
regex
* if `pat` is a compiled regex and `case` or `flags` is set
Notes
-----
When `pat` is a compiled regex, all flags should be included in the
compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled
regex will raise an error.
Examples
--------
When `pat` is a string and `regex` is True (the default), the given `pat`
is compiled as a regex. When `repl` is a string, it replaces matching
regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are
left as is:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f.', 'ba', regex=True)
0 bao
1 baz
2 NaN
dtype: object
When `pat` is a string and `regex` is False, every `pat` is replaced with
`repl` as with :meth:`str.replace`:
>>> pd.Series(['f.o', 'fuz', np.nan]).str.replace('f.', 'ba', regex=False)
0 bao
1 fuz
2 NaN
dtype: object
When `repl` is a callable, it is called on every `pat` using
:func:`re.sub`. The callable should expect one positional argument
(a regex object) and return a string.
To get the idea:
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace('f', repr)
0 <_sre.SRE_Match object; span=(0, 1), match='f'>oo
1 <_sre.SRE_Match object; span=(0, 1), match='f'>uz
2 NaN
dtype: object
Reverse every lowercase alphabetic word:
>>> repl = lambda m: m.group(0)[::-1]
>>> pd.Series(['foo 123', 'bar baz', np.nan]).str.replace(r'[a-z]+', repl)
0 oof 123
1 rab zab
2 NaN
dtype: object
Using regex groups (extract second group and swap case):
>>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)"
>>> repl = lambda m: m.group('two').swapcase()
>>> pd.Series(['One Two Three', 'Foo Bar Baz']).str.replace(pat, repl)
0 tWO
1 bAR
dtype: object
Using a compiled regex with flags
>>> regex_pat = re.compile(r'FUZ', flags=re.IGNORECASE)
>>> pd.Series(['foo', 'fuz', np.nan]).str.replace(regex_pat, 'bar')
0 foo
1 bar
2 NaN
dtype: object
"""
# Check whether repl is valid (GH 13438, GH 15055)
if not (is_string_like(repl) or callable(repl)):
raise TypeError("repl must be a string or callable")
is_compiled_re = is_re(pat)
if regex:
if is_compiled_re:
if (case is not None) or (flags != 0):
raise ValueError("case and flags cannot be set"
" when pat is a compiled regex")
else:
# not a compiled regex
# set default case
if case is None:
case = True
# add case flag, if provided
if case is False:
flags |= re.IGNORECASE
if is_compiled_re or len(pat) > 1 or flags or callable(repl):
n = n if n >= 0 else 0
compiled = re.compile(pat, flags=flags)
f = lambda x: compiled.sub(repl=repl, string=x, count=n)
else:
f = lambda x: x.replace(pat, repl, n)
else:
if is_compiled_re:
raise ValueError("Cannot use a compiled regex as replacement "
"pattern with regex=False")
if callable(repl):
raise ValueError("Cannot use a callable replacement when "
"regex=False")
f = lambda x: x.replace(pat, repl, n)
return _na_map(f, arr)
def str_repeat(arr, repeats):
"""
Duplicate each string in the Series or Index.
Parameters
----------
repeats : int or sequence of int
Same value for all (int) or different value per (sequence).
Returns
-------
Series or Index of object
Series or Index of repeated string objects specified by
input parameter repeats.
Examples
--------
>>> s = pd.Series(['a', 'b', 'c'])
>>> s
0 a
1 b
2 c
Single int repeats string in Series
>>> s.str.repeat(repeats=2)
0 aa
1 bb
2 cc
Sequence of int repeats corresponding string in Series
>>> s.str.repeat(repeats=[1, 2, 3])
0 a
1 bb
2 ccc
"""
if is_scalar(repeats):
def rep(x):
try:
return compat.binary_type.__mul__(x, repeats)
except TypeError:
return compat.text_type.__mul__(x, repeats)
return _na_map(rep, arr)
else:
def rep(x, r):
try:
return compat.binary_type.__mul__(x, r)
except TypeError:
return compat.text_type.__mul__(x, r)
repeats = np.asarray(repeats, dtype=object)
result = libops.vec_binop(com.values_from_object(arr), repeats, rep)
return result
def str_match(arr, pat, case=True, flags=0, na=np.nan):
"""
Determine if each string matches a regular expression.
Parameters
----------
pat : string
Character sequence or regular expression
case : boolean, default True
If True, case sensitive
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
na : default NaN, fill value for missing values
Returns
-------
Series/array of boolean values
See Also
--------
contains : analogous, but less strict, relying on re.search instead of
re.match
extract : extract matched groups
"""
if not case:
flags |= re.IGNORECASE
regex = re.compile(pat, flags=flags)
dtype = bool
f = lambda x: bool(regex.match(x))
return _na_map(f, arr, na, dtype=dtype)
def _get_single_group_name(rx):
try:
return list(rx.groupindex.keys()).pop()
except IndexError:
return None
def _groups_or_na_fun(regex):
"""Used in both extract_noexpand and extract_frame"""
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
empty_row = [np.nan] * regex.groups
def f(x):
if not isinstance(x, compat.string_types):
return empty_row
m = regex.search(x)
if m:
return [np.nan if item is None else item for item in m.groups()]
else:
return empty_row
return f
def _str_extract_noexpand(arr, pat, flags=0):
"""
Find groups in each string in the Series using passed regular
expression. This function is called from
str_extract(expand=False), and can return Series, DataFrame, or
Index.
"""
from pandas import DataFrame, Index
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
if regex.groups == 1:
result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)
name = _get_single_group_name(regex)
else:
if isinstance(arr, Index):
raise ValueError("only one regex group is supported with Index")
name = None
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if arr.empty:
result = DataFrame(columns=columns, dtype=object)
else:
result = DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=arr.index,
dtype=object)
return result, name
def _str_extract_frame(arr, pat, flags=0):
"""
For each subject string in the Series, extract groups from the
first match of regular expression pat. This function is called from
str_extract(expand=True), and always returns a DataFrame.
"""
from pandas import DataFrame
regex = re.compile(pat, flags=flags)
groups_or_na = _groups_or_na_fun(regex)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
if len(arr) == 0:
return DataFrame(columns=columns, dtype=object)
try:
result_index = arr.index
except AttributeError:
result_index = None
return DataFrame(
[groups_or_na(val) for val in arr],
columns=columns,
index=result_index,
dtype=object)
def str_extract(arr, pat, flags=0, expand=True):
r"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the
first match of regular expression `pat`.
Parameters
----------
pat : string
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that
modify regular expression matching for things like case,
spaces, etc. For more details, see :mod:`re`.
expand : bool, default True
If True, return DataFrame with one column per capture group.
If False, return a Series/Index if there is one capture group
or DataFrame if there are multiple capture groups.
.. versionadded:: 0.18.0
Returns
-------
DataFrame or Series or Index
A DataFrame with one row for each subject string, and one
column for each group. Any capture group names in regular
expression pat will be used for column names; otherwise
capture group numbers will be used. The dtype of each result
column is always object, even when no match is found. If
``expand=False`` and pat has only one capture group, then
return a Series (if subject is a Series) or Index (if subject
is an Index).
See Also
--------
extractall : returns all matches (not just the first match)
Examples
--------
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
>>> s = pd.Series(['a1', 'b2', 'c3'])
>>> s.str.extract(r'([ab])(\d)')
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> s.str.extract(r'([ab])?(\d)')
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> s.str.extract(r'(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
A pattern with one group will return a DataFrame with one column
if expand=True.
>>> s.str.extract(r'[ab](\d)', expand=True)
0
0 1
1 2
2 NaN
A pattern with one group will return a Series if expand=False.
>>> s.str.extract(r'[ab](\d)', expand=False)
0 1
1 2
2 NaN
dtype: object
"""
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand:
return _str_extract_frame(arr._orig, pat, flags=flags)
else:
result, name = _str_extract_noexpand(arr._parent, pat, flags=flags)
return arr._wrap_result(result, name=name, expand=expand)
def str_extractall(arr, pat, flags=0):
r"""
For each subject string in the Series, extract groups from all
matches of regular expression pat. When each subject string in the
Series has exactly one match, extractall(pat).xs(0, level='match')
is the same as extract(pat).
.. versionadded:: 0.18.0
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
A ``re`` module flag, for example ``re.IGNORECASE``. These allow
to modify regular expression matching for things like case, spaces,
etc. Multiple flags can be combined with the bitwise OR operator,
for example ``re.IGNORECASE | re.MULTILINE``.
Returns
-------
DataFrame
A ``DataFrame`` with one row for each match, and one column for each
group. Its rows have a ``MultiIndex`` with first levels that come from
the subject ``Series``. The last level is named 'match' and indexes the
matches in each item of the ``Series``. Any capture group names in
regular expression pat will be used for column names; otherwise capture
group numbers will be used.
See Also
--------
extract : returns first match only (not all matches)
Examples
--------
A pattern with one group will return a DataFrame with one column.
Indices with no matches will not appear in the result.
>>> s = pd.Series(["a1a2", "b1", "c1"], index=["A", "B", "C"])
>>> s.str.extractall(r"[ab](\d)")
0
match
A 0 1
1 2
B 0 1
Capture group names are used for column names of the result.
>>> s.str.extractall(r"[ab](?P<digit>\d)")
digit
match
A 0 1
1 2
B 0 1
A pattern with two groups will return a DataFrame with two columns.
>>> s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
Optional groups that do not match are NaN in the result.
>>> s.str.extractall(r"(?P<letter>[ab])?(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
C 0 NaN 1
"""
regex = re.compile(pat, flags=flags)
# the regex must contain capture groups.
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
if isinstance(arr, ABCIndex):
arr = arr.to_series().reset_index(drop=True)
names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
columns = [names.get(1 + i, i) for i in range(regex.groups)]
match_list = []
index_list = []
is_mi = arr.index.nlevels > 1
for subject_key, subject in arr.iteritems():
if isinstance(subject, compat.string_types):
if not is_mi:
subject_key = (subject_key, )
for match_i, match_tuple in enumerate(regex.findall(subject)):
if isinstance(match_tuple, compat.string_types):
match_tuple = (match_tuple,)
na_tuple = [np.NaN if group == "" else group
for group in match_tuple]
match_list.append(na_tuple)
result_key = tuple(subject_key + (match_i, ))
index_list.append(result_key)
from pandas import MultiIndex
index = MultiIndex.from_tuples(
index_list, names=arr.index.names + ["match"])
result = arr._constructor_expanddim(match_list, index=index,
columns=columns)
return result
def str_get_dummies(arr, sep='|'):
"""
Split each string in the Series by sep and return a frame of
dummy/indicator variables.
Parameters
----------
sep : string, default "|"
String to split on.
Returns
-------
dummies : DataFrame
Examples
--------
>>> pd.Series(['a|b', 'a', 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 1 0 0
2 1 0 1
>>> pd.Series(['a|b', np.nan, 'a|c']).str.get_dummies()
a b c
0 1 1 0
1 0 0 0
2 1 0 1
See Also
--------
pandas.get_dummies
"""
arr = arr.fillna('')
try:
arr = sep + arr + sep
except TypeError:
arr = sep + arr.astype(str) + sep
tags = set()
for ts in arr.str.split(sep):
tags.update(ts)
tags = sorted(tags - {""})
dummies = np.empty((len(arr), len(tags)), dtype=np.int64)
for i, t in enumerate(tags):
pat = sep + t + sep
dummies[:, i] = lib.map_infer(arr.values, lambda x: pat in x)
return dummies, tags
def str_join(arr, sep):
"""
Join lists contained as elements in the Series/Index with passed delimiter.
If the elements of a Series are lists themselves, join the content of these
lists using the delimiter passed to the function.
This function is an equivalent to :meth:`str.join`.
Parameters
----------
sep : str
Delimiter to use between list entries.
Returns
-------
Series/Index: object
The list entries concatenated by intervening occurrences of the
delimiter.
Raises
-------
AttributeError
If the supplied Series contains neither strings nor lists.
Notes
-----
If any of the list items is not a string object, the result of the join
will be `NaN`.
See Also
--------
str.join : Standard library version of this method.
Series.str.split : Split strings around given separator/delimiter.
Examples
--------
Example with a list that contains non-string elements.
>>> s = pd.Series([['lion', 'elephant', 'zebra'],
... [1.1, 2.2, 3.3],
... ['cat', np.nan, 'dog'],
... ['cow', 4.5, 'goat'],
... ['duck', ['swan', 'fish'], 'guppy']])
>>> s
0 [lion, elephant, zebra]
1 [1.1, 2.2, 3.3]
2 [cat, nan, dog]
3 [cow, 4.5, goat]
4 [duck, [swan, fish], guppy]
dtype: object
Join all lists using a '-'. The lists containing object(s) of types other
than str will produce a NaN.
>>> s.str.join('-')
0 lion-elephant-zebra
1 NaN
2 NaN
3 NaN
4 NaN
dtype: object
"""
return _na_map(sep.join, arr)
def str_findall(arr, pat, flags=0):
"""
Find all occurrences of pattern or regular expression in the Series/Index.
Equivalent to applying :func:`re.findall` to all the elements in the
Series/Index.
Parameters
----------
pat : string
Pattern or regular expression.
flags : int, default 0
``re`` module flags, e.g. `re.IGNORECASE` (default is 0, which means
no flags).
Returns
-------
Series/Index of lists of strings
All non-overlapping matches of pattern or regular expression in each
string of this Series/Index.
See Also
--------
count : Count occurrences of pattern or regular expression in each string
of the Series/Index.
extractall : For each string in the Series, extract groups from all matches
of regular expression and return a DataFrame with one row for each
match and one column for each group.
re.findall : The equivalent ``re`` function to all non-overlapping matches
of pattern or regular expression in string, as a list of strings.
Examples
--------
>>> s = pd.Series(['Lion', 'Monkey', 'Rabbit'])
The search for the pattern 'Monkey' returns one match:
>>> s.str.findall('Monkey')
0 []
1 [Monkey]
2 []
dtype: object
On the other hand, the search for the pattern 'MONKEY' doesn't return any
match:
>>> s.str.findall('MONKEY')
0 []
1 []
2 []
dtype: object
Flags can be added to the pattern or regular expression. For instance,
to find the pattern 'MONKEY' ignoring the case:
>>> import re
>>> s.str.findall('MONKEY', flags=re.IGNORECASE)
0 []
1 [Monkey]
2 []
dtype: object
When the pattern matches more than one string in the Series, all matches
are returned:
>>> s.str.findall('on')
0 [on]
1 [on]
2 []
dtype: object
Regular expressions are supported too. For instance, the search for all the
strings ending with the word 'on' is shown next:
>>> s.str.findall('on$')
0 [on]
1 []
2 []
dtype: object
If the pattern is found more than once in the same string, then a list of
multiple strings is returned:
>>> s.str.findall('b')
0 []
1 []
2 [b, b]
dtype: object
"""
regex = re.compile(pat, flags=flags)
return _na_map(regex.findall, arr)
def str_find(arr, sub, start=0, end=None, side='left'):
"""
Return indexes in each strings in the Series/Index where the
substring is fully contained between [start:end]. Return -1 on failure.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
side : {'left', 'right'}, default 'left'
Specifies a starting side, equivalent to ``find`` or ``rfind``
Returns
-------
found : Series/Index of integer values
"""
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'find'
elif side == 'right':
method = 'rfind'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_index(arr, sub, start=0, end=None, side='left'):
if not isinstance(sub, compat.string_types):
msg = 'expected a string object, not {0}'
raise TypeError(msg.format(type(sub).__name__))
if side == 'left':
method = 'index'
elif side == 'right':
method = 'rindex'
else: # pragma: no cover
raise ValueError('Invalid side')
if end is None:
f = lambda x: getattr(x, method)(sub, start)
else:
f = lambda x: getattr(x, method)(sub, start, end)
return _na_map(f, arr, dtype=int)
def str_pad(arr, width, side='left', fillchar=' '):
"""
Pad strings in the Series/Index up to width.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with character defined in `fillchar`.
side : {'left', 'right', 'both'}, default 'left'
Side from which to fill resulting string.
fillchar : str, default ' '
Additional character for filling, default is whitespace.
Returns
-------
Series or Index of object
Returns Series or Index with minimum number of char in object.
See Also
--------
Series.str.rjust: Fills the left side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='left')``.
Series.str.ljust: Fills the right side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='right')``.
Series.str.center: Fills boths sides of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='both')``.
Series.str.zfill: Pad strings in the Series/Index by prepending '0'
character. Equivalent to ``Series.str.pad(side='left', fillchar='0')``.
Examples
--------
>>> s = pd.Series(["caribou", "tiger"])
>>> s
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10)
0 caribou
1 tiger
dtype: object
>>> s.str.pad(width=10, side='right', fillchar='-')
0 caribou---
1 tiger-----
dtype: object
>>> s.str.pad(width=10, side='both', fillchar='-')
0 -caribou--
1 --tiger---
dtype: object
"""
if not isinstance(fillchar, compat.string_types):
msg = 'fillchar must be a character, not {0}'
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError('fillchar must be a character, not str')
if not is_integer(width):
msg = 'width must be of integer type, not {0}'
raise TypeError(msg.format(type(width).__name__))
if side == 'left':
f = lambda x: x.rjust(width, fillchar)
elif side == 'right':
f = lambda x: x.ljust(width, fillchar)
elif side == 'both':
f = lambda x: x.center(width, fillchar)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_split(arr, pat=None, n=None):
if pat is None:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if len(pat) == 1:
if n is None or n == 0:
n = -1
f = lambda x: x.split(pat, n)
else:
if n is None or n == -1:
n = 0
regex = re.compile(pat)
f = lambda x: regex.split(x, maxsplit=n)
res = _na_map(f, arr)
return res
def str_rsplit(arr, pat=None, n=None):
if n is None or n == 0:
n = -1
f = lambda x: x.rsplit(pat, n)
res = _na_map(f, arr)
return res
def str_slice(arr, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series or Index.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
Series or Index of object
Series or Index from sliced substring from original string object.
See Also
--------
Series.str.slice_replace : Replace a slice with a string.
Series.str.get : Return element at position.
Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i`
being the position.
Examples
--------
>>> s = pd.Series(["koala", "fox", "chameleon"])
>>> s
0 koala
1 fox
2 chameleon
dtype: object
>>> s.str.slice(start=1)
0 oala
1 ox
2 hameleon
dtype: object
>>> s.str.slice(stop=2)
0 ko
1 fo
2 ch
dtype: object
>>> s.str.slice(step=2)
0 kaa
1 fx
2 caeen
dtype: object
>>> s.str.slice(start=0, stop=5, step=3)
0 kl
1 f
2 cm
dtype: object
Equivalent behaviour to:
>>> s.str[0:5:3]
0 kl
1 f
2 cm
dtype: object
"""
obj = slice(start, stop, step)
f = lambda x: x[obj]
return _na_map(f, arr)
def str_slice_replace(arr, start=None, stop=None, repl=None):
"""
Replace a positional slice of a string with another value.
Parameters
----------
start : int, optional
Left index position to use for the slice. If not specified (None),
the slice is unbounded on the left, i.e. slice from the start
of the string.
stop : int, optional
Right index position to use for the slice. If not specified (None),
the slice is unbounded on the right, i.e. slice until the
end of the string.
repl : str, optional
String for replacement. If not specified (None), the sliced region
is replaced with an empty string.
Returns
-------
replaced : Series or Index
Same type as the original object.
See Also
--------
Series.str.slice : Just slicing without replacement.
Examples
--------
>>> s = pd.Series(['a', 'ab', 'abc', 'abdc', 'abcde'])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: object
Specify just `start`, meaning replace `start` until the end of the
string with `repl`.
>>> s.str.slice_replace(1, repl='X')
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: object
Specify just `stop`, meaning the start of the string to `stop` is replaced
with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl='X')
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: object
Specify `start` and `stop`, meaning the slice from `start` to `stop` is
replaced with `repl`. Everything before or after `start` and `stop` is
included as is.
>>> s.str.slice_replace(start=1, stop=3, repl='X')
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: object
"""
if repl is None:
repl = ''
def f(x):
if x[start:stop] == '':
local_stop = start
else:
local_stop = stop
y = ''
if start is not None:
y += x[:start]
y += repl
if stop is not None:
y += x[local_stop:]
return y
return _na_map(f, arr)
def str_strip(arr, to_strip=None, side='both'):
"""
Strip whitespace (including newlines) from each string in the
Series/Index.
Parameters
----------
to_strip : str or unicode
side : {'left', 'right', 'both'}, default 'both'
Returns
-------
stripped : Series/Index of objects
"""
if side == 'both':
f = lambda x: x.strip(to_strip)
elif side == 'left':
f = lambda x: x.lstrip(to_strip)
elif side == 'right':
f = lambda x: x.rstrip(to_strip)
else: # pragma: no cover
raise ValueError('Invalid side')
return _na_map(f, arr)
def str_wrap(arr, width, **kwargs):
r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line-width
expand_tabs : bool, optional
If true, tab characters will be expanded to spaces (default: True)
replace_whitespace : bool, optional
If true, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True)
drop_whitespace : bool, optional
If true, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True)
break_long_words : bool, optional
If true, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width. (default: True)
break_on_hyphens : bool, optional
If true, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words. (default: True)
Returns
-------
wrapped : Series/Index of objects
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
"""
kwargs['width'] = width
tw = textwrap.TextWrapper(**kwargs)
return _na_map(lambda s: '\n'.join(tw.wrap(s)), arr)
def str_translate(arr, table, deletechars=None):
"""
Map all characters in the string through the given mapping table.
Equivalent to standard :meth:`str.translate`. Note that the optional
argument deletechars is only valid if you are using python 2. For python 3,
character deletion should be specified via the table argument.
Parameters
----------
table : dict (python 3), str or None (python 2)
In python 3, table is a mapping of Unicode ordinals to Unicode
ordinals, strings, or None. Unmapped characters are left untouched.
Characters mapped to None are deleted. :meth:`str.maketrans` is a
helper function for making translation tables.
In python 2, table is either a string of length 256 or None. If the
table argument is None, no translation is applied and the operation
simply removes the characters in deletechars. :func:`string.maketrans`
is a helper function for making translation tables.
deletechars : str, optional (python 2)
A string of characters to delete. This argument is only valid
in python 2.
Returns
-------
translated : Series/Index of objects
"""
if deletechars is None:
f = lambda x: x.translate(table)
else:
if compat.PY3:
raise ValueError("deletechars is not a valid argument for "
"str.translate in python 3. You should simply "
"specify character deletions in the table "
"argument")
f = lambda x: x.translate(table, deletechars)
return _na_map(f, arr)
def str_get(arr, i):
"""
Extract element from each component at specified position.
Extract element from lists, tuples, or strings in each element in the
Series/Index.
Parameters
----------
i : int
Position of element to extract.
Returns
-------
items : Series/Index of objects
Examples
--------
>>> s = pd.Series(["String",
(1, 2, 3),
["a", "b", "c"],
123, -456,
{1:"Hello", "2":"World"}])
>>> s
0 String
1 (1, 2, 3)
2 [a, b, c]
3 123
4 -456
5 {1: 'Hello', '2': 'World'}
dtype: object
>>> s.str.get(1)
0 t
1 2
2 b
3 NaN
4 NaN
5 Hello
dtype: object
>>> s.str.get(-1)
0 g
1 3
2 c
3 NaN
4 NaN
5 NaN
dtype: object
"""
def f(x):
if isinstance(x, dict):
return x.get(i)
elif len(x) > i >= -len(x):
return x[i]
return np.nan
return _na_map(f, arr)
def str_decode(arr, encoding, errors="strict"):
"""
Decode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in
python3.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
decoded : Series/Index of objects
"""
if encoding in _cpython_optimized_decoders:
# CPython optimized implementation
f = lambda x: x.decode(encoding, errors)
else:
decoder = codecs.getdecoder(encoding)
f = lambda x: decoder(x, errors)[0]
return _na_map(f, arr)
def str_encode(arr, encoding, errors="strict"):
"""
Encode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.encode`.
Parameters
----------
encoding : str
errors : str, optional
Returns
-------
encoded : Series/Index of objects
"""
if encoding in _cpython_optimized_encoders:
# CPython optimized implementation
f = lambda x: x.encode(encoding, errors)
else:
encoder = codecs.getencoder(encoding)
f = lambda x: encoder(x, errors)[0]
return _na_map(f, arr)
def _noarg_wrapper(f, docstring=None, **kargs):
def wrapper(self):
result = _na_map(f, self._parent, **kargs)
return self._wrap_result(result)
wrapper.__name__ = f.__name__
if docstring is not None:
wrapper.__doc__ = docstring
else:
raise ValueError('Provide docstring')
return wrapper
def _pat_wrapper(f, flags=False, na=False, **kwargs):
def wrapper1(self, pat):
result = f(self._parent, pat)
return self._wrap_result(result)
def wrapper2(self, pat, flags=0, **kwargs):
result = f(self._parent, pat, flags=flags, **kwargs)
return self._wrap_result(result)
def wrapper3(self, pat, na=np.nan):
result = f(self._parent, pat, na=na)
return self._wrap_result(result)
wrapper = wrapper3 if na else wrapper2 if flags else wrapper1
wrapper.__name__ = f.__name__
if f.__doc__:
wrapper.__doc__ = f.__doc__
return wrapper
def copy(source):
"Copy a docstring from another source function (if present)"
def do_copy(target):
if source.__doc__:
target.__doc__ = source.__doc__
return target
return do_copy
class StringMethods(NoNewAttributesMixin):
"""
Vectorized string functions for Series and Index. NAs stay NA unless
handled otherwise by a particular method. Patterned after Python's string
methods, with some inspiration from R's stringr package.
Examples
--------
>>> s.str.split('_')
>>> s.str.replace('_', '')
"""
def __init__(self, data):
self._validate(data)
self._is_categorical = is_categorical_dtype(data)
# .values.categories works for both Series/Index
self._parent = data.values.categories if self._is_categorical else data
# save orig to blow up categoricals to the right type
self._orig = data
self._freeze()
@staticmethod
def _validate(data):
from pandas.core.index import Index
if (isinstance(data, ABCSeries) and
not ((is_categorical_dtype(data.dtype) and
is_object_dtype(data.values.categories)) or
(is_object_dtype(data.dtype)))):
# it's neither a string series not a categorical series with
# strings inside the categories.
# this really should exclude all series with any non-string values
# (instead of test for object dtype), but that isn't practical for
# performance reasons until we have a str dtype (GH 9343)
raise AttributeError("Can only use .str accessor with string "
"values, which use np.object_ dtype in "
"pandas")
elif isinstance(data, Index):
# can't use ABCIndex to exclude non-str
# see src/inference.pyx which can contain string values
allowed_types = ('string', 'unicode', 'mixed', 'mixed-integer')
if is_categorical_dtype(data.dtype):
inf_type = data.categories.inferred_type
else:
inf_type = data.inferred_type
if inf_type not in allowed_types:
message = ("Can only use .str accessor with string values "
"(i.e. inferred_type is 'string', 'unicode' or "
"'mixed')")
raise AttributeError(message)
if data.nlevels > 1:
message = ("Can only use .str accessor with Index, not "
"MultiIndex")
raise AttributeError(message)
def __getitem__(self, key):
if isinstance(key, slice):
return self.slice(start=key.start, stop=key.stop, step=key.step)
else:
return self.get(key)
def __iter__(self):
i = 0
g = self.get(i)
while g.notna().any():
yield g
i += 1
g = self.get(i)
def _wrap_result(self, result, use_codes=True,
name=None, expand=None):
from pandas.core.index import Index, MultiIndex
# for category, we do the stuff on the categories, so blow it up
# to the full series again
# But for some operations, we have to do the stuff on the full values,
# so make it possible to skip this step as the method already did this
# before the transformation...
if use_codes and self._is_categorical:
result = take_1d(result, self._orig.cat.codes)
if not hasattr(result, 'ndim') or not hasattr(result, 'dtype'):
return result
assert result.ndim < 3
if expand is None:
# infer from ndim if expand is not specified
expand = False if result.ndim == 1 else True
elif expand is True and not isinstance(self._orig, Index):
# required when expand=True is explicitly specified
# not needed when inferred
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if result:
# propagate nan values to match longest sequence (GH 18450)
max_len = max(len(x) for x in result)
result = [x * max_len if len(x) == 0 or x[0] is np.nan
else x for x in result]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand is False:
# if expand is False, result should have the same name
# as the original otherwise specified
if name is None:
name = getattr(result, 'name', None)
if name is None:
# do not use logical or, _orig may be a DataFrame
# which has "name" column
name = self._orig.name
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
if isinstance(self._orig, Index):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
if expand:
result = list(result)
out = MultiIndex.from_tuples(result, names=name)
if out.nlevels == 1:
# We had all tuples of length-one, which are
# better represented as a regular Index.
out = out.get_level_values(0)
return out
else:
return Index(result, name=name)
else:
index = self._orig.index
if expand:
cons = self._orig._constructor_expanddim
return cons(result, columns=name, index=index)
else:
# Must be a Series
cons = self._orig._constructor
return cons(result, name=name, index=index)
def _get_series_list(self, others, ignore_index=False):
"""
Auxiliary function for :meth:`str.cat`. Turn potentially mixed input
into a list of Series (elements without an index must match the length
of the calling Series/Index).
Parameters
----------
others : Series, Index, DataFrame, np.ndarray, list-like or list-like
of objects that are Series, Index or np.ndarray (1-dim)
ignore_index : boolean, default False
Determines whether to forcefully align others with index of caller
Returns
-------
tuple : (others transformed into list of Series,
boolean whether FutureWarning should be raised)
"""
# Once str.cat defaults to alignment, this function can be simplified;
# will not need `ignore_index` and the second boolean output anymore
from pandas import Index, Series, DataFrame
# self._orig is either Series or Index
idx = self._orig if isinstance(self._orig, Index) else self._orig.index
err_msg = ('others must be Series, Index, DataFrame, np.ndarrary or '
'list-like (either containing only strings or containing '
'only objects of type Series/Index/list-like/np.ndarray)')
# Generally speaking, all objects without an index inherit the index
# `idx` of the calling Series/Index - i.e. must have matching length.
# Objects with an index (i.e. Series/Index/DataFrame) keep their own
# index, *unless* ignore_index is set to True.
if isinstance(others, Series):
warn = not others.index.equals(idx)
# only reconstruct Series when absolutely necessary
los = [Series(others.values, index=idx)
if ignore_index and warn else others]
return (los, warn)
elif isinstance(others, Index):
warn = not others.equals(idx)
los = [Series(others.values,
index=(idx if ignore_index else others))]
return (los, warn)
elif isinstance(others, DataFrame):
warn = not others.index.equals(idx)
if ignore_index and warn:
# without copy, this could change "others"
# that was passed to str.cat
others = others.copy()
others.index = idx
return ([others[x] for x in others], warn)
elif isinstance(others, np.ndarray) and others.ndim == 2:
others = DataFrame(others, index=idx)
return ([others[x] for x in others], False)
elif is_list_like(others, allow_sets=False):
others = list(others) # ensure iterators do not get read twice etc
# in case of list-like `others`, all elements must be
# either one-dimensional list-likes or scalars
if all(is_list_like(x, allow_sets=False) for x in others):
los = []
join_warn = False
depr_warn = False
# iterate through list and append list of series for each
# element (which we check to be one-dimensional and non-nested)
while others:
nxt = others.pop(0) # nxt is guaranteed list-like by above
# GH 21950 - DeprecationWarning
# only allowing Series/Index/np.ndarray[1-dim] will greatly
# simply this function post-deprecation.
if not (isinstance(nxt, (Series, Index)) or
(isinstance(nxt, np.ndarray) and nxt.ndim == 1)):
depr_warn = True
if not isinstance(nxt, (DataFrame, Series,
Index, np.ndarray)):
# safety for non-persistent list-likes (e.g. iterators)
# do not map indexed/typed objects; info needed below
nxt = list(nxt)
# known types for which we can avoid deep inspection
no_deep = ((isinstance(nxt, np.ndarray) and nxt.ndim == 1)
or isinstance(nxt, (Series, Index)))
# nested list-likes are forbidden:
# -> elements of nxt must not be list-like
is_legal = ((no_deep and nxt.dtype == object)
or all(not is_list_like(x) for x in nxt))
# DataFrame is false positive of is_legal
# because "x in df" returns column names
if not is_legal or isinstance(nxt, DataFrame):
raise TypeError(err_msg)
nxt, wnx = self._get_series_list(nxt,
ignore_index=ignore_index)
los = los + nxt
join_warn = join_warn or wnx
if depr_warn:
warnings.warn('list-likes other than Series, Index, or '
'np.ndarray WITHIN another list-like are '
'deprecated and will be removed in a future '
'version.', FutureWarning, stacklevel=3)
return (los, join_warn)
elif all(not is_list_like(x) for x in others):
return ([Series(others, index=idx)], False)
raise TypeError(err_msg)
def cat(self, others=None, sep=None, na_rep=None, join=None):
"""
Concatenate strings in the Series/Index with given separator.
If `others` is specified, this function concatenates the Series/Index
and elements of `others` element-wise.
If `others` is not passed, then all values in the Series/Index are
concatenated into a single string with a given `sep`.
Parameters
----------
others : Series, Index, DataFrame, np.ndarrary or list-like
Series, Index, DataFrame, np.ndarray (one- or two-dimensional) and
other list-likes of strings must have the same length as the
calling Series/Index, with the exception of indexed objects (i.e.
Series/Index/DataFrame) if `join` is not None.
If others is a list-like that contains a combination of Series,
Index or np.ndarray (1-dim), then all elements will be unpacked and
must satisfy the above criteria individually.
If others is None, the method returns the concatenation of all
strings in the calling Series/Index.
sep : str, default ''
The separator between the different elements/columns. By default
the empty string `''` is used.
na_rep : str or None, default None
Representation that is inserted for all missing values:
- If `na_rep` is None, and `others` is None, missing values in the
Series/Index are omitted from the result.
- If `na_rep` is None, and `others` is not None, a row containing a
missing value in any of the columns (before concatenation) will
have a missing value in the result.
join : {'left', 'right', 'outer', 'inner'}, default None
Determines the join-style between the calling Series/Index and any
Series/Index/DataFrame in `others` (objects without an index need
to match the length of the calling Series/Index). If None,
alignment is disabled, but this option will be removed in a future
version of pandas and replaced with a default of `'left'`. To
disable alignment, use `.values` on any Series/Index/DataFrame in
`others`.
.. versionadded:: 0.23.0
Returns
-------
concat : str or Series/Index of objects
If `others` is None, `str` is returned, otherwise a `Series/Index`
(same type as caller) of objects is returned.
See Also
--------
split : Split each string in the Series/Index
join : Join lists contained as elements in the Series/Index
Examples
--------
When not passing `others`, all values are concatenated into a single
string:
>>> s = pd.Series(['a', 'b', np.nan, 'd'])
>>> s.str.cat(sep=' ')
'a b d'
By default, NA values in the Series are ignored. Using `na_rep`, they
can be given a representation:
>>> s.str.cat(sep=' ', na_rep='?')
'a b ? d'
If `others` is specified, corresponding values are concatenated with
the separator. Result will be a Series of strings.
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',')
0 a,A
1 b,B
2 NaN
3 d,D
dtype: object
Missing values will remain missing in the result, but can again be
represented using `na_rep`
>>> s.str.cat(['A', 'B', 'C', 'D'], sep=',', na_rep='-')
0 a,A
1 b,B
2 -,C
3 d,D
dtype: object
If `sep` is not specified, the values are concatenated without
separation.
>>> s.str.cat(['A', 'B', 'C', 'D'], na_rep='-')
0 aA
1 bB
2 -C
3 dD
dtype: object
Series with different indexes can be aligned before concatenation. The
`join`-keyword works as in other methods.
>>> t = pd.Series(['d', 'a', 'e', 'c'], index=[3, 0, 4, 2])
>>> s.str.cat(t, join='left', na_rep='-')
0 aa
1 b-
2 -c
3 dd
dtype: object
>>>
>>> s.str.cat(t, join='outer', na_rep='-')
0 aa
1 b-
2 -c
3 dd
4 -e
dtype: object
>>>
>>> s.str.cat(t, join='inner', na_rep='-')
0 aa
2 -c
3 dd
dtype: object
>>>
>>> s.str.cat(t, join='right', na_rep='-')
3 dd
0 aa
4 -e
2 -c
dtype: object
For more examples, see :ref:`here <text.concatenate>`.
"""
from pandas import Index, Series, concat
if isinstance(others, compat.string_types):
raise ValueError("Did you mean to supply a `sep` keyword?")
if sep is None:
sep = ''
if isinstance(self._orig, Index):
data = Series(self._orig, index=self._orig)
else: # Series
data = self._orig
# concatenate Series/Index with itself if no "others"
if others is None:
data = ensure_object(data)
na_mask = isna(data)
if na_rep is None and na_mask.any():
data = data[~na_mask]
elif na_rep is not None and na_mask.any():
data = np.where(na_mask, na_rep, data)
return sep.join(data)
try:
# turn anything in "others" into lists of Series
others, warn = self._get_series_list(others,
ignore_index=(join is None))
except ValueError: # do not catch TypeError raised by _get_series_list
if join is None:
raise ValueError('All arrays must be same length, except '
'those having an index if `join` is not None')
else:
raise ValueError('If `others` contains arrays or lists (or '
'other list-likes without an index), these '
'must all be of the same length as the '
'calling Series/Index.')
if join is None and warn:
warnings.warn("A future version of pandas will perform index "
"alignment when `others` is a Series/Index/"
"DataFrame (or a list-like containing one). To "
"disable alignment (the behavior before v.0.23) and "
"silence this warning, use `.values` on any Series/"
"Index/DataFrame in `others`. To enable alignment "
"and silence this warning, pass `join='left'|"
"'outer'|'inner'|'right'`. The future default will "
"be `join='left'`.", FutureWarning, stacklevel=2)
# if join is None, _get_series_list already force-aligned indexes
join = 'left' if join is None else join
# align if required
if any(not data.index.equals(x.index) for x in others):
# Need to add keys for uniqueness in case of duplicate columns
others = concat(others, axis=1,
join=(join if join == 'inner' else 'outer'),
keys=range(len(others)), copy=False)
data, others = data.align(others, join=join)
others = [others[x] for x in others] # again list of Series
all_cols = [ensure_object(x) for x in [data] + others]
na_masks = np.array([isna(x) for x in all_cols])
union_mask = np.logical_or.reduce(na_masks, axis=0)
if na_rep is None and union_mask.any():
# no na_rep means NaNs for all rows where any column has a NaN
# only necessary if there are actually any NaNs
result = np.empty(len(data), dtype=object)
np.putmask(result, union_mask, np.nan)
not_masked = ~union_mask
result[not_masked] = cat_core([x[not_masked] for x in all_cols],
sep)
elif na_rep is not None and union_mask.any():
# fill NaNs with na_rep in case there are actually any NaNs
all_cols = [np.where(nm, na_rep, col)
for nm, col in zip(na_masks, all_cols)]
result = cat_core(all_cols, sep)
else:
# no NaNs - can just concatenate
result = cat_core(all_cols, sep)
if isinstance(self._orig, Index):
result = Index(result, name=self._orig.name)
else: # Series
result = Series(result, index=data.index, name=self._orig.name)
return result
_shared_docs['str_split'] = ("""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the %(side)s,
at the specified delimiter string. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
pat : str, optional
String or regular expression to split on.
If not specified, split on whitespace.
n : int, default -1 (all)
Limit number of splits in output.
``None``, 0 and -1 will be interpreted as return all splits.
expand : bool, default False
Expand the splitted strings into separate columns.
* If ``True``, return DataFrame/MultiIndex expanding dimensionality.
* If ``False``, return Series/Index, containing lists of strings.
Returns
-------
Series, Index, DataFrame or MultiIndex
Type matches caller unless ``expand=True`` (see Notes).
See Also
--------
Series.str.split : Split strings around given separator/delimiter.
Series.str.rsplit : Splits string around given separator/delimiter,
starting from the right.
Series.str.join : Join lists contained as elements in the Series/Index
with passed delimiter.
str.split : Standard library version for split.
str.rsplit : Standard library version for rsplit.
Notes
-----
The handling of the `n` keyword depends on the number of found splits:
- If found splits > `n`, make first `n` splits only
- If found splits <= `n`, make all splits
- If for a certain row the number of found splits < `n`,
append `None` for padding up to `n` if ``expand=True``
If using ``expand=True``, Series and Index callers return DataFrame and
MultiIndex objects, respectively.
Examples
--------
>>> s = pd.Series(["this is a regular sentence",
"https://docs.python.org/3/tutorial/index.html", np.nan])
In the default setting, the string is split by whitespace.
>>> s.str.split()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
Without the `n` parameter, the outputs of `rsplit` and `split`
are identical.
>>> s.str.rsplit()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `n` parameter can be used to limit the number of splits on the
delimiter. The outputs of `split` and `rsplit` are different.
>>> s.str.split(n=2)
0 [this, is, a regular sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
>>> s.str.rsplit(n=2)
0 [this is a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `pat` parameter can be used to split by other characters.
>>> s.str.split(pat = "/")
0 [this is a regular sentence]
1 [https:, , docs.python.org, 3, tutorial, index...
2 NaN
dtype: object
When using ``expand=True``, the split elements will expand out into
separate columns. If NaN is present, it is propagated throughout
the columns during the split.
>>> s.str.split(expand=True)
0 1 2 3
0 this is a regular
1 https://docs.python.org/3/tutorial/index.html None None None
2 NaN NaN NaN NaN \
4
0 sentence
1 None
2 NaN
For slightly more complex use cases like splitting the html document name
from a url, a combination of parameter settings can be used.
>>> s.str.rsplit("/", n=1, expand=True)
0 1
0 this is a regular sentence None
1 https://docs.python.org/3/tutorial index.html
2 NaN NaN
""")
@Appender(_shared_docs['str_split'] % {
'side': 'beginning',
'method': 'split'})
def split(self, pat=None, n=-1, expand=False):
result = str_split(self._parent, pat, n=n)
return self._wrap_result(result, expand=expand)
@Appender(_shared_docs['str_split'] % {
'side': 'end',
'method': 'rsplit'})
def rsplit(self, pat=None, n=-1, expand=False):
result = str_rsplit(self._parent, pat, n=n)
return self._wrap_result(result, expand=expand)
_shared_docs['str_partition'] = ("""
Split the string at the %(side)s occurrence of `sep`.
This method splits the string at the %(side)s occurrence of `sep`,
and returns 3 elements containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found, return %(return)s.
Parameters
----------
pat : str, default whitespace
String to split on.
expand : bool, default True
If True, return DataFrame/MultiIndex expanding dimensionality.
If False, return Series/Index.
Returns
-------
DataFrame/MultiIndex or Series/Index of objects
See Also
--------
%(also)s
Series.str.split : Split strings around given separators.
str.partition : Standard library version.
Examples
--------
>>> s = pd.Series(['Linda van der Berg', 'George Pitt-Rivers'])
>>> s
0 Linda van der Berg
1 George Pitt-Rivers
dtype: object
>>> s.str.partition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by the last space instead of the first one:
>>> s.str.rpartition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by something different than a space:
>>> s.str.partition('-')
0 1 2
0 Linda van der Berg
1 George Pitt - Rivers
To return a Series containining tuples instead of a DataFrame:
>>> s.str.partition('-', expand=False)
0 (Linda van der Berg, , )
1 (George Pitt, -, Rivers)
dtype: object
Also available on indices:
>>> idx = pd.Index(['X 123', 'Y 999'])
>>> idx
Index(['X 123', 'Y 999'], dtype='object')
Which will create a MultiIndex:
>>> idx.str.partition()
MultiIndex(levels=[['X', 'Y'], [' '], ['123', '999']],
labels=[[0, 1], [0, 0], [0, 1]])
Or an index with tuples with ``expand=False``:
>>> idx.str.partition(expand=False)
Index([('X', ' ', '123'), ('Y', ' ', '999')], dtype='object')
""")
@Appender(_shared_docs['str_partition'] % {
'side': 'first',
'return': '3 elements containing the string itself, followed by two '
'empty strings',
'also': 'rpartition : Split the string at the last occurrence of `sep`'
})
def partition(self, pat=' ', expand=True):
f = lambda x: x.partition(pat)
result = _na_map(f, self._parent)
return self._wrap_result(result, expand=expand)
@Appender(_shared_docs['str_partition'] % {
'side': 'last',
'return': '3 elements containing two empty strings, followed by the '
'string itself',
'also': 'partition : Split the string at the first occurrence of `sep`'
})
def rpartition(self, pat=' ', expand=True):
f = lambda x: x.rpartition(pat)
result = _na_map(f, self._parent)
return self._wrap_result(result, expand=expand)
@copy(str_get)
def get(self, i):
result = str_get(self._parent, i)
return self._wrap_result(result)
@copy(str_join)
def join(self, sep):
result = str_join(self._parent, sep)
return self._wrap_result(result)
@copy(str_contains)
def contains(self, pat, case=True, flags=0, na=np.nan, regex=True):
result = str_contains(self._parent, pat, case=case, flags=flags, na=na,
regex=regex)
return self._wrap_result(result)
@copy(str_match)
def match(self, pat, case=True, flags=0, na=np.nan):
result = str_match(self._parent, pat, case=case, flags=flags, na=na)
return self._wrap_result(result)
@copy(str_replace)
def replace(self, pat, repl, n=-1, case=None, flags=0, regex=True):
result = str_replace(self._parent, pat, repl, n=n, case=case,
flags=flags, regex=regex)
return self._wrap_result(result)
@copy(str_repeat)
def repeat(self, repeats):
result = str_repeat(self._parent, repeats)
return self._wrap_result(result)
@copy(str_pad)
def pad(self, width, side='left', fillchar=' '):
result = str_pad(self._parent, width, side=side, fillchar=fillchar)
return self._wrap_result(result)
_shared_docs['str_pad'] = ("""
Filling %(side)s side of strings in the Series/Index with an
additional character. Equivalent to :meth:`str.%(method)s`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with ``fillchar``
fillchar : str
Additional character for filling, default is whitespace
Returns
-------
filled : Series/Index of objects
""")
@Appender(_shared_docs['str_pad'] % dict(side='left and right',
method='center'))
def center(self, width, fillchar=' '):
return self.pad(width, side='both', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='right', method='ljust'))
def ljust(self, width, fillchar=' '):
return self.pad(width, side='right', fillchar=fillchar)
@Appender(_shared_docs['str_pad'] % dict(side='left', method='rjust'))
def rjust(self, width, fillchar=' '):
return self.pad(width, side='left', fillchar=fillchar)
def zfill(self, width):
"""
Pad strings in the Series/Index by prepending '0' characters.
Strings in the Series/Index are padded with '0' characters on the
left of the string to reach a total string length `width`. Strings
in the Series/Index with length greater or equal to `width` are
unchanged.
Parameters
----------
width : int
Minimum length of resulting string; strings with length less
than `width` be prepended with '0' characters.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.rjust: Fills the left side of strings with an arbitrary
character.
Series.str.ljust: Fills the right side of strings with an arbitrary
character.
Series.str.pad: Fills the specified sides of strings with an arbitrary
character.
Series.str.center: Fills boths sides of strings with an arbitrary
character.
Notes
-----
Differs from :meth:`str.zfill` which has special handling
for '+'/'-' in the string.
Examples
--------
>>> s = pd.Series(['-1', '1', '1000', 10, np.nan])
>>> s
0 -1
1 1
2 1000
3 10
4 NaN
dtype: object
Note that ``10`` and ``NaN`` are not strings, therefore they are
converted to ``NaN``. The minus sign in ``'-1'`` is treated as a
regular character and the zero is added to the left of it
(:meth:`str.zfill` would have moved it to the left). ``1000``
remains unchanged as it is longer than `width`.
>>> s.str.zfill(3)
0 0-1
1 001
2 1000
3 NaN
4 NaN
dtype: object
"""
result = str_pad(self._parent, width, side='left', fillchar='0')
return self._wrap_result(result)
@copy(str_slice)
def slice(self, start=None, stop=None, step=None):
result = str_slice(self._parent, start, stop, step)
return self._wrap_result(result)
@copy(str_slice_replace)
def slice_replace(self, start=None, stop=None, repl=None):
result = str_slice_replace(self._parent, start, stop, repl)
return self._wrap_result(result)
@copy(str_decode)
def decode(self, encoding, errors="strict"):
result = str_decode(self._parent, encoding, errors)
return self._wrap_result(result)
@copy(str_encode)
def encode(self, encoding, errors="strict"):
result = str_encode(self._parent, encoding, errors)
return self._wrap_result(result)
_shared_docs['str_strip'] = (r"""
Remove leading and trailing characters.
Strip whitespaces (including newlines) or a set of specified characters
from each string in the Series/Index from %(side)s.
Equivalent to :meth:`str.%(method)s`.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.strip : Remove leading and trailing characters in Series/Index
Series.str.lstrip : Remove leading characters in Series/Index
Series.str.rstrip : Remove trailing characters in Series/Index
Examples
--------
>>> s = pd.Series(['1. Ant. ', '2. Bee!\n', '3. Cat?\t', np.nan])
>>> s
0 1. Ant.
1 2. Bee!\n
2 3. Cat?\t
3 NaN
dtype: object
>>> s.str.strip()
0 1. Ant.
1 2. Bee!
2 3. Cat?
3 NaN
dtype: object
>>> s.str.lstrip('123.')
0 Ant.
1 Bee!\n
2 Cat?\t
3 NaN
dtype: object
>>> s.str.rstrip('.!? \n\t')
0 1. Ant
1 2. Bee
2 3. Cat
3 NaN
dtype: object
>>> s.str.strip('123.!? \n\t')
0 Ant
1 Bee
2 Cat
3 NaN
dtype: object
""")
@Appender(_shared_docs['str_strip'] % dict(side='left and right sides',
method='strip'))
def strip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side='both')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='left side',
method='lstrip'))
def lstrip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['str_strip'] % dict(side='right side',
method='rstrip'))
def rstrip(self, to_strip=None):
result = str_strip(self._parent, to_strip, side='right')
return self._wrap_result(result)
@copy(str_wrap)
def wrap(self, width, **kwargs):
result = str_wrap(self._parent, width, **kwargs)
return self._wrap_result(result)
@copy(str_get_dummies)
def get_dummies(self, sep='|'):
# we need to cast to Series of strings as only that has all
# methods available for making the dummies...
data = self._orig.astype(str) if self._is_categorical else self._parent
result, name = str_get_dummies(data, sep)
return self._wrap_result(result, use_codes=(not self._is_categorical),
name=name, expand=True)
@copy(str_translate)
def translate(self, table, deletechars=None):
result = str_translate(self._parent, table, deletechars)
return self._wrap_result(result)
count = _pat_wrapper(str_count, flags=True)
startswith = _pat_wrapper(str_startswith, na=True)
endswith = _pat_wrapper(str_endswith, na=True)
findall = _pat_wrapper(str_findall, flags=True)
@copy(str_extract)
def extract(self, pat, flags=0, expand=True):
return str_extract(self, pat, flags=flags, expand=expand)
@copy(str_extractall)
def extractall(self, pat, flags=0):
return str_extractall(self._orig, pat, flags=flags)
_shared_docs['find'] = ("""
Return %(side)s indexes in each strings in the Series/Index
where the substring is fully contained between [start:end].
Return -1 on failure. Equivalent to standard :meth:`str.%(method)s`.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of integer values
See Also
--------
%(also)s
""")
@Appender(_shared_docs['find'] %
dict(side='lowest', method='find',
also='rfind : Return highest indexes in each strings'))
def find(self, sub, start=0, end=None):
result = str_find(self._parent, sub, start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['find'] %
dict(side='highest', method='rfind',
also='find : Return lowest indexes in each strings'))
def rfind(self, sub, start=0, end=None):
result = str_find(self._parent, sub,
start=start, end=end, side='right')
return self._wrap_result(result)
def normalize(self, form):
"""Return the Unicode normal form for the strings in the Series/Index.
For more information on the forms, see the
:func:`unicodedata.normalize`.
Parameters
----------
form : {'NFC', 'NFKC', 'NFD', 'NFKD'}
Unicode form
Returns
-------
normalized : Series/Index of objects
"""
import unicodedata
f = lambda x: unicodedata.normalize(form, compat.u_safe(x))
result = _na_map(f, self._parent)
return self._wrap_result(result)
_shared_docs['index'] = ("""
Return %(side)s indexes in each strings where the substring is
fully contained between [start:end]. This is the same as
``str.%(similar)s`` except instead of returning -1, it raises a ValueError
when the substring is not found. Equivalent to standard ``str.%(method)s``.
Parameters
----------
sub : str
Substring being searched
start : int
Left edge index
end : int
Right edge index
Returns
-------
found : Series/Index of objects
See Also
--------
%(also)s
""")
@Appender(_shared_docs['index'] %
dict(side='lowest', similar='find', method='index',
also='rindex : Return highest indexes in each strings'))
def index(self, sub, start=0, end=None):
result = str_index(self._parent, sub,
start=start, end=end, side='left')
return self._wrap_result(result)
@Appender(_shared_docs['index'] %
dict(side='highest', similar='rfind', method='rindex',
also='index : Return lowest indexes in each strings'))
def rindex(self, sub, start=0, end=None):
result = str_index(self._parent, sub,
start=start, end=end, side='right')
return self._wrap_result(result)
_shared_docs['len'] = ("""
Computes the length of each element in the Series/Index. The element may be
a sequence (such as a string, tuple or list) or a collection
(such as a dictionary).
Returns
-------
Series or Index of int
A Series or Index of integer values indicating the length of each
element in the Series or Index.
See Also
--------
str.len : Python built-in function returning the length of an object.
Series.size : Returns the length of the Series.
Examples
--------
Returns the length (number of characters) in a string. Returns the
number of entries for dictionaries, lists or tuples.
>>> s = pd.Series(['dog',
... '',
... 5,
... {'foo' : 'bar'},
... [2, 3, 5, 7],
... ('one', 'two', 'three')])
>>> s
0 dog
1
2 5
3 {'foo': 'bar'}
4 [2, 3, 5, 7]
5 (one, two, three)
dtype: object
>>> s.str.len()
0 3.0
1 0.0
2 NaN
3 1.0
4 4.0
5 3.0
dtype: float64
""")
len = _noarg_wrapper(len, docstring=_shared_docs['len'], dtype=int)
_shared_docs['casemethods'] = ("""
Convert strings in the Series/Index to %(type)s.
Equivalent to :meth:`str.%(method)s`.
Returns
-------
Series/Index of objects
See Also
--------
Series.str.lower : Converts all characters to lowercase.
Series.str.upper : Converts all characters to uppercase.
Series.str.title : Converts first character of each word to uppercase and
remaining to lowercase.
Series.str.capitalize : Converts first character to uppercase and
remaining to lowercase.
Series.str.swapcase : Converts uppercase to lowercase and lowercase to
uppercase.
Examples
--------
>>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: object
>>> s.str.lower()
0 lower
1 capitals
2 this is a sentence
3 swapcase
dtype: object
>>> s.str.upper()
0 LOWER
1 CAPITALS
2 THIS IS A SENTENCE
3 SWAPCASE
dtype: object
>>> s.str.title()
0 Lower
1 Capitals
2 This Is A Sentence
3 Swapcase
dtype: object
>>> s.str.capitalize()
0 Lower
1 Capitals
2 This is a sentence
3 Swapcase
dtype: object
>>> s.str.swapcase()
0 LOWER
1 capitals
2 THIS IS A SENTENCE
3 sWaPcAsE
dtype: object
""")
_shared_docs['lower'] = dict(type='lowercase', method='lower')
_shared_docs['upper'] = dict(type='uppercase', method='upper')
_shared_docs['title'] = dict(type='titlecase', method='title')
_shared_docs['capitalize'] = dict(type='be capitalized',
method='capitalize')
_shared_docs['swapcase'] = dict(type='be swapcased', method='swapcase')
lower = _noarg_wrapper(lambda x: x.lower(),
docstring=_shared_docs['casemethods'] %
_shared_docs['lower'])
upper = _noarg_wrapper(lambda x: x.upper(),
docstring=_shared_docs['casemethods'] %
_shared_docs['upper'])
title = _noarg_wrapper(lambda x: x.title(),
docstring=_shared_docs['casemethods'] %
_shared_docs['title'])
capitalize = _noarg_wrapper(lambda x: x.capitalize(),
docstring=_shared_docs['casemethods'] %
_shared_docs['capitalize'])
swapcase = _noarg_wrapper(lambda x: x.swapcase(),
docstring=_shared_docs['casemethods'] %
_shared_docs['swapcase'])
_shared_docs['ismethods'] = ("""
Check whether all characters in each string are %(type)s.
This is equivalent to running the Python string method
:meth:`str.%(method)s` for each element of the Series/Index. If a string
has zero characters, ``False`` is returned for that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same length as the original
Series/Index.
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isupper : Check whether all characters are uppercase.
Series.str.istitle : Check whether all characters are titlecase.
Examples
--------
**Checks for Alphabetic and Numeric Characters**
>>> s1 = pd.Series(['one', 'one1', '1', ''])
>>> s1.str.isalpha()
0 True
1 False
2 False
3 False
dtype: bool
>>> s1.str.isnumeric()
0 False
1 False
2 True
3 False
dtype: bool
>>> s1.str.isalnum()
0 True
1 True
2 True
3 False
dtype: bool
Note that checks against characters mixed with any additional punctuation
or whitespace will evaluate to false for an alphanumeric check.
>>> s2 = pd.Series(['A B', '1.5', '3,000'])
>>> s2.str.isalnum()
0 False
1 False
2 False
dtype: bool
**More Detailed Checks for Numeric Characters**
There are several different but overlapping sets of numeric characters that
can be checked for.
>>> s3 = pd.Series(['23', '³', '⅕', ''])
The ``s3.str.isdecimal`` method checks for characters used to form numbers
in base 10.
>>> s3.str.isdecimal()
0 True
1 False
2 False
3 False
dtype: bool
The ``s.str.isdigit`` method is the same as ``s3.str.isdecimal`` but also
includes special digits, like superscripted and subscripted digits in
unicode.
>>> s3.str.isdigit()
0 True
1 True
2 False
3 False
dtype: bool
The ``s.str.isnumeric`` method is the same as ``s3.str.isdigit`` but also
includes other characters that can represent quantities such as unicode
fractions.
>>> s3.str.isnumeric()
0 True
1 True
2 True
3 False
dtype: bool
**Checks for Whitespace**
>>> s4 = pd.Series([' ', '\\t\\r\\n ', ''])
>>> s4.str.isspace()
0 True
1 True
2 False
dtype: bool
**Checks for Character Case**
>>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s5.str.islower()
0 True
1 False
2 False
3 False
dtype: bool
>>> s5.str.isupper()
0 False
1 False
2 True
3 False
dtype: bool
The ``s5.str.istitle`` method checks for whether all words are in title
case (whether only the first letter of each word is capitalized). Words are
assumed to be as any sequence of non-numeric characters seperated by
whitespace characters.
>>> s5.str.istitle()
0 False
1 True
2 False
3 False
dtype: bool
""")
_shared_docs['isalnum'] = dict(type='alphanumeric', method='isalnum')
_shared_docs['isalpha'] = dict(type='alphabetic', method='isalpha')
_shared_docs['isdigit'] = dict(type='digits', method='isdigit')
_shared_docs['isspace'] = dict(type='whitespace', method='isspace')
_shared_docs['islower'] = dict(type='lowercase', method='islower')
_shared_docs['isupper'] = dict(type='uppercase', method='isupper')
_shared_docs['istitle'] = dict(type='titlecase', method='istitle')
_shared_docs['isnumeric'] = dict(type='numeric', method='isnumeric')
_shared_docs['isdecimal'] = dict(type='decimal', method='isdecimal')
isalnum = _noarg_wrapper(lambda x: x.isalnum(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalnum'])
isalpha = _noarg_wrapper(lambda x: x.isalpha(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isalpha'])
isdigit = _noarg_wrapper(lambda x: x.isdigit(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdigit'])
isspace = _noarg_wrapper(lambda x: x.isspace(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isspace'])
islower = _noarg_wrapper(lambda x: x.islower(),
docstring=_shared_docs['ismethods'] %
_shared_docs['islower'])
isupper = _noarg_wrapper(lambda x: x.isupper(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isupper'])
istitle = _noarg_wrapper(lambda x: x.istitle(),
docstring=_shared_docs['ismethods'] %
_shared_docs['istitle'])
isnumeric = _noarg_wrapper(lambda x: compat.u_safe(x).isnumeric(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isnumeric'])
isdecimal = _noarg_wrapper(lambda x: compat.u_safe(x).isdecimal(),
docstring=_shared_docs['ismethods'] %
_shared_docs['isdecimal'])
@classmethod
def _make_accessor(cls, data):
cls._validate(data)
return cls(data)
| bsd-3-clause |
wy36101299/NCKU_Machine-Learning-and-Bioinformatics | hw4_predictData/creatPredictdata.py | 1 | 2986 | import glob
import os
import pandas as pd
class CTD(object):
"""docstring for CTD"""
def __init__(self):
self.format_l = []
self.td_l = []
self.iternum = 0
self.formatname = ""
def feature(self,index):
format_l = self.format_l
feature = ((float(format_l[index+1][1])-float(format_l[index+3][1]))/float(format_l[index+1][1]))+((float(format_l[index+1][4])-float(format_l[index+3][4]))/float(format_l[index+1][4]))
if (feature == 0):
feature = 0.0001
return feature
def format(self,path):
a = path.split('/')
self.formatname = a[2]
with open(path, 'r') as f:
a = f.read()
f = a.split('\n')
f.pop(0)
self.iternum = len(f)-3
for a in range(len(f)):
a = f[a].split(',')
a.pop(0)
self.format_l.append(a)
def trainData(self):
for index in range(self.iternum):
try:
format_l = self.format_l
classify = (float(format_l[index][3])-float(format_l[index+1][3]))/float(format_l[index+1][3])*100
feature = self.feature(index)
a = ['0']+format_l[index+1]+format_l[index+2]+format_l[index+3]+[feature]
self.td_l.append(a)
except:
pass
def storage_csv(self):
rowname=['classify','feature','1-open','1-high','1-low','1-close','1-volume','1-adj close','2-open','2-high','2-low','2-close','2-volume','2-adj close','3-open','3-high','3-low','3-close','3-volume','3-adj close']
df = pd.DataFrame(self.td_l,columns=rowname)
with open('./traindata/td_'+self.formatname+'.csv', 'w') as f:
df.to_csv(f)
print('td_'+self.formatname+'.csv is creat!')
def storage_txt(self,pathname):
with open('./predict/data/'+pathname,'ab') as f:
for a in self.td_l:
b = str(a[0])+'\t'
for c in range(1,20):
d = str(c)+':'+str(a[c])+'\t'
b += d
f.write(b+'\n')
def run(self):
path = './stock/*'
paths=glob.glob(path)
for index,path in enumerate(paths,1):
print(index)
self.format_l = []
self.td_l = []
self.format(path)
self.trainData()
path = path.split('/')
pathname = path[2]
self.storage_txt(pathname)
print os.popen("./bin/svm-scale -s predict_scale_model ./predict/data/"+pathname+" > ./predict/scale/"+pathname+"predict_data.scale").read()
print os.popen("./bin/rvkde --best --predict --classify -v ./train/scale/"+pathname+"train_data.scale -V ./predict/scale/"+pathname+"predict_data.scale > ./predict/result/"+pathname+"predict_result").read()
def main():
ctd = CTD()
ctd.run()
if __name__ == '__main__' :
main()
| mit |
mumuwoyou/vnpy-master | vnpy/trader/gateway/tkproGateway/DataApi/data_api.py | 4 | 18709 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
import time
import numpy as np
from . import jrpc_py
# import jrpc
from . import utils
# def set_log_dir(log_dir):
# if log_dir:
# jrpc.set_log_dir(log_dir)
class DataApiCallback(object):
"""DataApi Callback
def on_quote(quote):
pass
def on_connection()
"""
def __init__(self):
self.on_quote = None
class DataApi(object):
"""
Abstract base class providing both historic and live data
from various data sources.
Current API version: 1.0
Attributes
----------
Methods
-------
subscribe
quote
daily
bar
bar_quote
"""
def __init__(self, addr="tcp://data.tushare.org:8910", use_jrpc=False):
"""Create DataApi client.
If use_jrpc, try to load the C version of JsonRpc. If failed, use pure
Python version of JsonRpc.
"""
self._remote = None
# if use_jrpc:
# try:
# import jrpc
# self._remote = jrpc.JRpcClient()
# except Exception as e:
# print "Can't load jrpc", e.message
if not self._remote:
self._remote = jrpc_py.JRpcClient()
self._remote.on_rpc_callback = self._on_rpc_callback
self._remote.on_disconnected = self._on_disconnected
self._remote.on_connected = self._on_connected
self._remote.connect(addr)
self._on_jsq_callback = None
self._connected = False
self._loggined = False
self._username = ""
self._password = ""
self._data_format = "default"
self._callback = None
self._schema = []
self._schema_id = 0
self._schema_map = {}
self._sub_hash = ""
self._subscribed_set = set()
self._timeout = 20
def login(self, username, password):
"""
Login before using data api.
Parameters
----------
username : str
username
password : str
password
"""
for i in range(3):
if self._connected:
break
time.sleep(1)
if not self._connected:
return (None, "-1,no connection")
self._username = username
self._password = password
return self._do_login()
def logout(self):
"""
Logout to stop using the data api or switch users.
"""
self._loggined = None
rpc_params = {}
cr = self._remote.call("auth.logout", rpc_params)
return utils.extract_result(cr)
def close(self):
"""
Close the data api.
"""
self._remote.close()
# def set_callback(self, callback):
# self._callback = callback
def set_timeout(self, timeout):
"""
Set timeout for data api.
Default timeout is 20s.
Parameters
----------
timeout : int
the max waiting time for the api return
"""
self._timeout = timeout
def set_data_format(self, format):
"""Set queried data format.
Available formats are:
"" -- Don't convert data, usually the type is map
"pandas" -- Convert table likely data to DataFrame
"""
self._data_format = format
def set_heartbeat(self, interval, timeout):
self._remote.set_hearbeat_options(interval, timeout)
def quote(self, symbol, fields="", data_format="", **kwargs):
r, msg = self._call_rpc("jsq.query",
self._get_format(data_format, "pandas"),
"Quote",
_index_column="symbol",
symbol=str(symbol),
fields=fields,
**kwargs)
return (r, msg)
def bar(self, symbol, start_time=200000, end_time=160000,
trade_date=0, freq="1m", fields="", data_format="", **kwargs):
"""
Query minute bars of various type, return DataFrame.
Parameters
----------
symbol : str
support multiple securities, separated by comma.
start_time : int (HHMMSS) or str ('HH:MM:SS')
Default is market open time.
end_time : int (HHMMSS) or str ('HH:MM:SS')
Default is market close time.
trade_date : int (YYYMMDD) or str ('YYYY-MM-DD')
Default is current trade_date.
fields : str, optional
separated by comma ',', default "" (all fields included).
freq : trade.common.MINBAR_TYPE, optional
{'1m', '5m', '15m'}, Minute bar type, default is '1m'
Returns
-------
df : pd.DataFrame
columns:
symbol, code, date, time, trade_date, freq, open, high, low, close, volume, turnover, vwap, oi
msg : str
error code and error message joined by comma
Examples
--------
df, msg = api.bar("000001.SH,cu1709.SHF", start_time="09:56:00", end_time="13:56:00",
trade_date="20170823", fields="open,high,low,last,volume", freq="5m")
"""
begin_time = utils.to_time_int(start_time)
if (begin_time == -1):
return (-1, "Begin time format error")
end_time = utils.to_time_int(end_time)
if (end_time == -1):
return (-1, "End time format error")
trade_date = utils.to_date_int(trade_date)
if (trade_date == -1):
return (-1, "Trade date format error")
return self._call_rpc("jsi.query",
self._get_format(data_format, "pandas"),
"Bar",
symbol=str(symbol),
fields=fields,
freq=freq,
trade_date=trade_date,
begin_time=begin_time,
end_time=end_time,
**kwargs)
def bar_quote(self, symbol, start_time=200000, end_time=160000,
trade_date=0, freq="1m", fields="", data_format="", **kwargs):
"""
Query minute bars of various type, return DataFrame.
It will also return ask/bid informations of the last quote in this bar
Parameters
----------
symbol : str
support multiple securities, separated by comma.
start_time : int (HHMMSS) or str ('HH:MM:SS')
Default is market open time.
end_time : int (HHMMSS) or str ('HH:MM:SS')
Default is market close time.
trade_date : int (YYYMMDD) or str ('YYYY-MM-DD')
Default is current trade_date.
fields : str, optional
separated by comma ',', default "" (all fields included).
freq : trade.common.MINBAR_TYPE, optional
{'1m', '5m', '15m'}, Minute bar type, default is '1m'
Returns
-------
df : pd.DataFrame
columns:
symbol, code, date, time, trade_date, freq, open, high, low, close, volume, turnover, vwap, oi,
askprice1, askprice2, askprice3, askprice4, askprice5,
bidprice1, bidprice2, bidprice3, bidprice4, bidprice5,
askvolume1, askvolume2, askvolume3, askvolume4, askvolume5,
bidvolume1, bidvolume2, bidvolume3, bidvolume4, bidvolume5
msg : str
error code and error message joined by comma
Examples
--------
df, msg = api.bar_quote("000001.SH,cu1709.SHF", start_time="09:56:00", end_time="13:56:00",
trade_date="20170823", fields="open,high,low,last,volume", freq="5m")
"""
begin_time = utils.to_time_int(start_time)
if (begin_time == -1):
return (-1, "Begin time format error")
end_time = utils.to_time_int(end_time)
if (end_time == -1):
return (-1, "End time format error")
trade_date = utils.to_date_int(trade_date)
if (trade_date == -1):
return (-1, "Trade date format error")
return self._call_rpc("jsi.bar_view",
self._get_format(data_format, "pandas"),
"BarQuote",
symbol=str(symbol),
fields=fields,
freq=freq,
trade_date=trade_date,
begin_time=begin_time,
end_time=end_time,
**kwargs)
def daily(self, symbol, start_date, end_date,
adjust_mode=None, freq="1d", fields="",
data_format="", **kwargs):
"""
Query dar bar,
support auto-fill suspended securities data,
support auto-adjust for splits, dividends and distributions.
Parameters
----------
symbol : str
support multiple securities, separated by comma.
start_date : int or str
YYYMMDD or 'YYYY-MM-DD'
end_date : int or str
YYYMMDD or 'YYYY-MM-DD'
fields : str, optional
separated by comma ',', default "" (all fields included).
adjust_mode : str or None, optional
None for no adjust;
'pre' for forward adjust;
'post' for backward adjust.
Returns
-------
df : pd.DataFrame
columns:
symbol, code, trade_date, open, high, low, close, volume, turnover, vwap, oi, suspended
msg : str
error code and error message joined by comma
Examples
--------
df, msg = api.daily("000001.SH,cu1709.SHF",start_date=20170503, end_date=20170708,
fields="open,high,low,last,volume", adjust_mode = "post")
"""
if adjust_mode == None:
adjust_mode = "none"
begin_date = utils.to_date_int(start_date)
if (begin_date == -1):
return (-1, "Begin date format error")
end_date = utils.to_date_int(end_date)
if (end_date == -1):
return (-1, "End date format error")
return self._call_rpc("jsd.query",
self._get_format(data_format, "pandas"),
"Daily",
symbol=str(symbol),
fields=fields,
begin_date=begin_date,
end_date=end_date,
adjust_mode=adjust_mode,
freq=freq,
**kwargs)
def query(self, view, filter="", fields="", data_format="", **kwargs):
"""
Get various reference data.
Parameters
----------
view : str
data source.
fields : str
Separated by ','
filter : str
filter expressions.
kwargs
Returns
-------
df : pd.DataFrame
msg : str
error code and error message, joined by ','
Examples
--------
res3, msg3 = ds.query("lb.secDailyIndicator", fields="price_level,high_52w_adj,low_52w_adj",\
filter="start_date=20170907&end_date=20170907",\
data_format='pandas')
view does not change. fileds can be any field predefined in reference data api.
"""
return self._call_rpc("jset.query",
self._get_format(data_format, "pandas"),
"JSetData",
view=view,
fields=fields,
filter=filter,
**kwargs)
def subscribe(self, symbol, func=None, fields=""):
"""Subscribe securites
This function adds new securities to subscribed list on the server. If
success, return subscribed codes.
If securities is empty, return current subscribed codes.
"""
r, msg = self._check_session()
if not r:
return (r, msg)
if func:
self._on_jsq_callback = func
rpc_params = {"symbol": symbol,
"fields": fields}
cr = self._remote.call("jsq.subscribe", rpc_params)
rsp, msg = utils.extract_result(cr, data_format="", class_name="SubRsp")
if not rsp:
return (rsp, msg)
new_codes = [x.strip() for x in symbol.split(',') if x]
self._subscribed_set = self._subscribed_set.union(set(new_codes))
self._schema_id = rsp['schema_id']
self._schema = rsp['schema']
self._sub_hash = rsp['sub_hash']
self._make_schema_map()
return (rsp['symbols'], msg)
def unsubscribe(self, symbol):
"""Unsubscribe securities.
Unscribe codes and return list of subscribed code.
"""
assert False, "NOT IMPLEMENTED"
def __del__(self):
self._remote.close()
def _on_disconnected(self):
"""JsonRpc callback"""
# print "DataApi: _on_disconnected"
self._connected = False
if self._callback:
self._callback("connection", False)
def _on_connected(self):
"""JsonRpc callback"""
self._connected = True
self._do_login()
self._do_subscribe()
if self._callback:
self._callback("connection", True)
def _check_session(self):
if not self._connected:
return (False, "no connection")
elif self._loggined:
return (True, "")
elif self._username and self._password:
return self._do_login()
else:
return (False, "no login session")
def _get_format(self, format, default_format):
if format:
return format
elif self._data_format != "default":
return self._data_format
else:
return default_format
def set_callback(self, callback):
self._callback = callback
def _convert_quote_ind(self, quote_ind):
"""Convert original quote_ind to a map.
The original quote_ind contains field index instead of field name!
"""
if quote_ind['schema_id'] != self._schema_id:
return None
indicators = quote_ind['indicators']
values = quote_ind['values']
max_index = len(self._schema)
quote = {}
for i in range(len(indicators)):
if indicators[i] < max_index:
quote[self._schema_map[indicators[i]]['name']] = values[i]
else:
quote[str(indicators[i])] = values[i]
return quote
def _on_rpc_callback(self, method, data):
# print "_on_rpc_callback:", method, data
try:
if method == "jsq.quote_ind":
if self._on_jsq_callback:
q = self._convert_quote_ind(data)
if q:
self._on_jsq_callback("quote", q)
elif method == ".sys.heartbeat":
if 'sub_hash' in data:
if self._sub_hash and self._sub_hash != data['sub_hash']:
print("sub_hash is not same", self._sub_hash, data['sub_hash'])
self._do_subscribe()
except Exception as e:
print("Can't load jrpc", e.message)
def _call_rpc(self, method, data_format, data_class, **kwargs):
r, msg = self._check_session()
if not r:
return (r, msg)
index_column = None
rpc_params = {}
for key, value in kwargs.items():
if key == '_index_column':
index_column = value
else:
if isinstance(value, (int, np.integer)):
value = int(value)
rpc_params[key] = value
cr = self._remote.call(method, rpc_params, timeout=self._timeout)
return utils.extract_result(cr, data_format=data_format, index_column=index_column, class_name=data_class)
def _make_schema_map(self):
self._schema_map = {}
for schema in self._schema:
self._schema_map[schema['id']] = schema
def _do_login(self):
# Shouldn't check connected flag here. ZMQ is a mesageq queue!
# if !self._connected :
# return (False, "-1,no connection")
if self._username and self._password:
rpc_params = {"username": self._username,
"password": self._password}
cr = self._remote.call("auth.login", rpc_params)
r, msg = utils.extract_result(cr, data_format="", class_name="UserInfo")
self._loggined = r
return (r, msg)
else:
self._loggined = None
return (False, "-1,empty username or password")
def _do_subscribe(self):
"""Subscribe again when reconnected or hash_code is not same"""
if not self._subscribed_set: return
codes = list(self._subscribed_set)
codes.sort()
# XXX subscribe with default fields!
rpc_params = {"symbol": ",".join(codes),
"fields": ""}
cr = self._remote.call("jsq.subscribe", rpc_params)
rsp, msg = utils.extract_result(cr, data_format="", class_name="SubRsp")
if not rsp:
# return (rsp, msg)
return
self._schema_id = rsp['schema_id']
self._schema = rsp['schema']
self._sub_hash = rsp['sub_hash']
# return (rsp.securities, msg)
self._make_schema_map()
| mit |
AlexGrig/GPy | GPy/core/parameterization/transformations.py | 10 | 20673 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from .domains import _POSITIVE,_NEGATIVE, _BOUNDED
import weakref
import sys
_exp_lim_val = np.finfo(np.float64).max
_lim_val = 36.0
epsilon = np.finfo(np.float64).resolution
#===============================================================================
# Fixing constants
__fixed__ = "fixed"
FIXED = False
UNFIXED = True
#===============================================================================
class Transformation(object):
domain = None
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance or cls._instance.__class__ is not cls:
cls._instance = super(Transformation, cls).__new__(cls, *args, **kwargs)
return cls._instance
def f(self, opt_param):
raise NotImplementedError
def finv(self, model_param):
raise NotImplementedError
def log_jacobian(self, model_param):
"""
compute the log of the jacobian of f, evaluated at f(x)= model_param
"""
raise NotImplementedError
def log_jacobian_grad(self, model_param):
"""
compute the drivative of the log of the jacobian of f, evaluated at f(x)= model_param
"""
raise NotImplementedError
def gradfactor(self, model_param, dL_dmodel_param):
""" df(opt_param)_dopt_param evaluated at self.f(opt_param)=model_param, times the gradient dL_dmodel_param,
i.e.:
define
.. math::
\frac{\frac{\partial L}{\partial f}\left(\left.\partial f(x)}{\partial x}\right|_{x=f^{-1}(f)\right)}
"""
raise NotImplementedError
def gradfactor_non_natural(self, model_param, dL_dmodel_param):
return self.gradfactor(model_param, dL_dmodel_param)
def initialize(self, f):
""" produce a sensible initial value for f(x)"""
raise NotImplementedError
def plot(self, xlabel=r'transformed $\theta$', ylabel=r'$\theta$', axes=None, *args,**kw):
assert "matplotlib" in sys.modules, "matplotlib package has not been imported."
import matplotlib.pyplot as plt
from ...plotting.matplot_dep import base_plots
x = np.linspace(-8,8)
base_plots.meanplot(x, self.f(x), *args, ax=axes, **kw)
axes = plt.gca()
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
def __str__(self):
raise NotImplementedError
def __repr__(self):
return self.__class__.__name__
class Logexp(Transformation):
domain = _POSITIVE
def f(self, x):
return np.where(x>_lim_val, x, np.log1p(np.exp(np.clip(x, -_lim_val, _lim_val)))) + epsilon
#raises overflow warning: return np.where(x>_lim_val, x, np.log(1. + np.exp(x)))
def finv(self, f):
return np.where(f>_lim_val, f, np.log(np.exp(f+1e-20) - 1.))
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, np.where(f>_lim_val, 1., 1. - np.exp(-f)))
def initialize(self, f):
if np.any(f < 0.):
print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def log_jacobian(self, model_param):
return np.where(model_param>_lim_val, model_param, np.log(np.exp(model_param+1e-20) - 1.)) - model_param
def log_jacobian_grad(self, model_param):
return 1./(np.exp(model_param)-1.)
def __str__(self):
return '+ve'
class Exponent(Transformation):
domain = _POSITIVE
def f(self, x):
return np.where(x<_lim_val, np.where(x>-_lim_val, np.exp(x), np.exp(-_lim_val)), np.exp(_lim_val))
def finv(self, x):
return np.log(x)
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, f)
def initialize(self, f):
if np.any(f < 0.):
print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def log_jacobian(self, model_param):
return np.log(model_param)
def log_jacobian_grad(self, model_param):
return 1./model_param
def __str__(self):
return '+ve'
class NormalTheta(Transformation):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def f(self, theta):
# In here abs is only a trick to make sure the numerics are ok.
# The variance will never go below zero, but at initialization we need to make sure
# that the values are ok
# Before:
theta[self.var_indices] = np.abs(-.5/theta[self.var_indices])
#theta[self.var_indices] = np.exp(-.5/theta[self.var_indices])
theta[self.mu_indices] *= theta[self.var_indices]
return theta # which is now {mu, var}
def finv(self, muvar):
# before:
varp = muvar[self.var_indices]
muvar[self.mu_indices] /= varp
muvar[self.var_indices] = -.5/varp
#muvar[self.var_indices] = -.5/np.log(varp)
return muvar # which is now {theta1, theta2}
def gradfactor(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
var = muvar[self.var_indices]
#=======================================================================
# theta gradients
# This works and the gradient checks!
dmuvar[self.mu_indices] *= var
dmuvar[self.var_indices] *= 2*(var)**2
dmuvar[self.var_indices] += 2*dmuvar[self.mu_indices]*mu
#=======================================================================
return dmuvar # which is now the gradient multiplicator for {theta1, theta2}
def initialize(self, f):
if np.any(f[self.var_indices] < 0.):
print("Warning: changing parameters to satisfy constraints")
f[self.var_indices] = np.abs(f[self.var_indices])
return f
def __str__(self):
return "theta"
def __getstate__(self):
return [self.mu_indices, self.var_indices]
def __setstate__(self, state):
self.mu_indices = state[0]
self.var_indices = state[1]
class NormalNaturalAntti(NormalTheta):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def gradfactor(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
var = muvar[self.var_indices]
#=======================================================================
# theta gradients
# This works and the gradient checks!
dmuvar[self.mu_indices] *= var
dmuvar[self.var_indices] *= 2*var**2#np.einsum('i,i,i,i->i', dmuvar[self.var_indices], [2], var, var)
#=======================================================================
return dmuvar # which is now the gradient multiplicator
def initialize(self, f):
if np.any(f[self.var_indices] < 0.):
print("Warning: changing parameters to satisfy constraints")
f[self.var_indices] = np.abs(f[self.var_indices])
return f
def __str__(self):
return "natantti"
class NormalEta(Transformation):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def f(self, theta):
theta[self.var_indices] = np.abs(theta[self.var_indices] - theta[self.mu_indices]**2)
return theta # which is now {mu, var}
def finv(self, muvar):
muvar[self.var_indices] += muvar[self.mu_indices]**2
return muvar # which is now {eta1, eta2}
def gradfactor(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
#=======================================================================
# Lets try natural gradients instead: Not working with bfgs... try stochastic!
dmuvar[self.mu_indices] -= 2*mu*dmuvar[self.var_indices]
#=======================================================================
return dmuvar # which is now the gradient multiplicator
def initialize(self, f):
if np.any(f[self.var_indices] < 0.):
print("Warning: changing parameters to satisfy constraints")
f[self.var_indices] = np.abs(f[self.var_indices])
return f
def __str__(self):
return "eta"
class NormalNaturalThroughTheta(NormalTheta):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def gradfactor(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
var = muvar[self.var_indices]
#=======================================================================
# This is just eta direction:
dmuvar[self.mu_indices] -= 2*mu*dmuvar[self.var_indices]
#=======================================================================
#=======================================================================
# This is by going through theta fully and then going into eta direction:
#dmu = dmuvar[self.mu_indices]
#dmuvar[self.var_indices] += dmu*mu*(var + 4/var)
#=======================================================================
return dmuvar # which is now the gradient multiplicator
def gradfactor_non_natural(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
var = muvar[self.var_indices]
#=======================================================================
# theta gradients
# This works and the gradient checks!
dmuvar[self.mu_indices] *= var
dmuvar[self.var_indices] *= 2*(var)**2
dmuvar[self.var_indices] += 2*dmuvar[self.mu_indices]*mu
#=======================================================================
return dmuvar # which is now the gradient multiplicator for {theta1, theta2}
def __str__(self):
return "natgrad"
class NormalNaturalWhooot(NormalTheta):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def gradfactor(self, muvar, dmuvar):
#mu = muvar[self.mu_indices]
#var = muvar[self.var_indices]
#=======================================================================
# This is just eta direction:
#dmuvar[self.mu_indices] -= 2*mu*dmuvar[self.var_indices]
#=======================================================================
#=======================================================================
# This is by going through theta fully and then going into eta direction:
#dmu = dmuvar[self.mu_indices]
#dmuvar[self.var_indices] += dmu*mu*(var + 4/var)
#=======================================================================
return dmuvar # which is now the gradient multiplicator
def __str__(self):
return "natgrad"
class NormalNaturalThroughEta(NormalEta):
"Do not use, not officially supported!"
_instances = []
def __new__(cls, mu_indices=None, var_indices=None):
"Do not use, not officially supported!"
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if np.all(instance().mu_indices==mu_indices, keepdims=False) and np.all(instance().var_indices==var_indices, keepdims=False):
return instance()
o = super(Transformation, cls).__new__(cls, mu_indices, var_indices)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, mu_indices, var_indices):
self.mu_indices = mu_indices
self.var_indices = var_indices
def gradfactor(self, muvar, dmuvar):
mu = muvar[self.mu_indices]
var = muvar[self.var_indices]
#=======================================================================
# theta gradients
# This works and the gradient checks!
dmuvar[self.mu_indices] *= var
dmuvar[self.var_indices] *= 2*(var)**2
dmuvar[self.var_indices] += 2*dmuvar[self.mu_indices]*mu
#=======================================================================
return dmuvar
def __str__(self):
return "natgrad"
class LogexpNeg(Transformation):
domain = _POSITIVE
def f(self, x):
return np.where(x>_lim_val, -x, -np.log(1. + np.exp(np.clip(x, -np.inf, _lim_val))))
#raises overflow warning: return np.where(x>_lim_val, x, np.log(1. + np.exp(x)))
def finv(self, f):
return np.where(f>_lim_val, 0, np.log(np.exp(-f) - 1.))
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, np.where(f>_lim_val, -1, -1 + np.exp(-f)))
def initialize(self, f):
if np.any(f < 0.):
print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def __str__(self):
return '+ve'
class NegativeLogexp(Transformation):
domain = _NEGATIVE
logexp = Logexp()
def f(self, x):
return -self.logexp.f(x) # np.log(1. + np.exp(x))
def finv(self, f):
return self.logexp.finv(-f) # np.log(np.exp(-f) - 1.)
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, -self.logexp.gradfactor(-f))
def initialize(self, f):
return -self.logexp.initialize(f) # np.abs(f)
def __str__(self):
return '-ve'
class LogexpClipped(Logexp):
max_bound = 1e100
min_bound = 1e-10
log_max_bound = np.log(max_bound)
log_min_bound = np.log(min_bound)
domain = _POSITIVE
_instances = []
def __new__(cls, lower=1e-6, *args, **kwargs):
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().lower == lower:
return instance()
o = super(Transformation, cls).__new__(cls, lower, *args, **kwargs)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, lower=1e-6):
self.lower = lower
def f(self, x):
exp = np.exp(np.clip(x, self.log_min_bound, self.log_max_bound))
f = np.log(1. + exp)
# if np.isnan(f).any():
# import ipdb;ipdb.set_trace()
return np.clip(f, self.min_bound, self.max_bound)
def finv(self, f):
return np.log(np.exp(f - 1.))
def gradfactor(self, f, df):
ef = np.exp(f) # np.clip(f, self.min_bound, self.max_bound))
gf = (ef - 1.) / ef
return np.einsum('i,i->i', df, gf) # np.where(f < self.lower, 0, gf)
def initialize(self, f):
if np.any(f < 0.):
print("Warning: changing parameters to satisfy constraints")
return np.abs(f)
def __str__(self):
return '+ve_c'
class NegativeExponent(Exponent):
domain = _NEGATIVE
def f(self, x):
return -Exponent.f(x)
def finv(self, f):
return Exponent.finv(-f)
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, f)
def initialize(self, f):
return -Exponent.initialize(f) #np.abs(f)
def __str__(self):
return '-ve'
class Square(Transformation):
domain = _POSITIVE
def f(self, x):
return x ** 2
def finv(self, x):
return np.sqrt(x)
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, 2 * np.sqrt(f))
def initialize(self, f):
return np.abs(f)
def __str__(self):
return '+sq'
class Logistic(Transformation):
domain = _BOUNDED
_instances = []
def __new__(cls, lower=1e-6, upper=1e-6, *args, **kwargs):
if cls._instances:
cls._instances[:] = [instance for instance in cls._instances if instance()]
for instance in cls._instances:
if instance().lower == lower and instance().upper == upper:
return instance()
newfunc = super(Transformation, cls).__new__
if newfunc is object.__new__:
o = newfunc(cls)
else:
o = newfunc(cls, lower, upper, *args, **kwargs)
cls._instances.append(weakref.ref(o))
return cls._instances[-1]()
def __init__(self, lower, upper):
assert lower < upper
self.lower, self.upper = float(lower), float(upper)
self.difference = self.upper - self.lower
def f(self, x):
if (x<-300.).any():
x = x.copy()
x[x<-300.] = -300.
return self.lower + self.difference / (1. + np.exp(-x))
def finv(self, f):
return np.log(np.clip(f - self.lower, 1e-10, np.inf) / np.clip(self.upper - f, 1e-10, np.inf))
def gradfactor(self, f, df):
return np.einsum('i,i->i', df, (f - self.lower) * (self.upper - f) / self.difference)
def initialize(self, f):
if np.any(np.logical_or(f < self.lower, f > self.upper)):
print("Warning: changing parameters to satisfy constraints")
#return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(f * 0.), f)
#FIXME: Max, zeros_like right?
return np.where(np.logical_or(f < self.lower, f > self.upper), self.f(np.zeros_like(f)), f)
def __str__(self):
return '{},{}'.format(self.lower, self.upper)
| bsd-3-clause |
tawsifkhan/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
voxlol/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
anirudhjayaraman/Dato-Core | src/unity/python/graphlab/test/test_sarray.py | 13 | 60654 | # -*- coding: utf-8 -*-
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
from graphlab.data_structures.sarray import SArray
from graphlab_util.timezone import GMT
import pandas as pd
import numpy as np
import unittest
import random
import datetime as dt
import copy
import os
import math
import shutil
import array
import util
import time
import itertools
import warnings
import functools
#######################################################
# Metrics tracking tests are in test_usage_metrics.py #
#######################################################
class SArrayTest(unittest.TestCase):
def setUp(self):
self.int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.bool_data = [x % 2 == 0 for x in range(10)]
self.datetime_data = [dt.datetime(2013, 5, 7, 10, 4, 10),dt.datetime(1902, 10, 21, 10, 34, 10),None]
self.float_data = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]
self.string_data = ["abc", "def", "hello", "world", "pika", "chu", "hello", "world"]
self.vec_data = [array.array('d', [i, i+1]) for i in self.int_data]
self.list_data = [[i, str(i), i * 1.0] for i in self.int_data]
self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
self.url = "http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz"
def __test_equal(self, _sarray, _data, _type):
self.assertEqual(_sarray.dtype(), _type)
self.assertSequenceEqual(list(_sarray.head(_sarray.size())), _data)
def __test_creation(self, data, dtype, expected):
"""
Create sarray from data with dtype, and test it equals to
expected.
"""
s = SArray(data, dtype)
self.__test_equal(s, expected, dtype)
s = SArray(pd.Series(data), dtype)
self.__test_equal(s, expected, dtype)
def __test_creation_type_inference(self, data, expected_dtype, expected):
"""
Create sarray from data with dtype, and test it equals to
expected.
"""
s = SArray(data)
self.__test_equal(s, expected, expected_dtype)
s = SArray(pd.Series(data))
self.__test_equal(s, expected, expected_dtype)
def test_creation(self):
self.__test_creation(self.int_data, int, self.int_data)
self.__test_creation(self.int_data, float, [float(x) for x in self.int_data])
self.__test_creation(self.int_data, str, [str(x) for x in self.int_data])
self.__test_creation(self.float_data, float, self.float_data)
self.assertRaises(TypeError, self.__test_creation, [self.float_data, int])
self.__test_creation(self.string_data, str, self.string_data)
self.assertRaises(TypeError, self.__test_creation, [self.string_data, int])
self.assertRaises(TypeError, self.__test_creation, [self.string_data, float])
expected_output = [chr(x) for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(SArray(self.url, str), expected_output, str)
self.__test_creation(self.vec_data, array.array, self.vec_data)
self.__test_creation(self.list_data, list, self.list_data)
self.__test_creation(self.dict_data, dict, self.dict_data)
# test with type inference
self.__test_creation_type_inference(self.int_data, int, self.int_data)
self.__test_creation_type_inference(self.float_data, float, self.float_data)
self.__test_creation_type_inference(self.bool_data, int, [int(x) for x in self.bool_data])
self.__test_creation_type_inference(self.string_data, str, self.string_data)
self.__test_creation_type_inference(self.vec_data, array.array, self.vec_data)
self.__test_creation_type_inference([np.bool_(True),np.bool_(False)],int,[1,0])
def test_list_with_none_creation(self):
tlist=[[2,3,4],[5,6],[4,5,10,None]]
g=SArray(tlist)
self.assertEqual(len(g), len(tlist))
for i in range(len(tlist)):
self.assertEqual(g[i], tlist[i])
def test_list_with_array_creation(self):
import array
t = array.array('d',[1.1,2,3,4,5.5])
g=SArray(t)
self.assertEqual(len(g), len(t))
self.assertEqual(g.dtype(), float)
glist = list(g)
for i in range(len(glist)):
self.assertAlmostEqual(glist[i], t[i])
t = array.array('i',[1,2,3,4,5])
g=SArray(t)
self.assertEqual(len(g), len(t))
self.assertEqual(g.dtype(), int)
glist = list(g)
for i in range(len(glist)):
self.assertEqual(glist[i], t[i])
def test_save_load(self):
# Make sure these files don't exist before testing
self._remove_sarray_files("intarr")
self._remove_sarray_files("fltarr")
self._remove_sarray_files("strarr")
self._remove_sarray_files("vecarr")
self._remove_sarray_files("listarr")
self._remove_sarray_files("dictarr")
sint = SArray(self.int_data, int)
sflt = SArray([float(x) for x in self.int_data], float)
sstr = SArray([str(x) for x in self.int_data], str)
svec = SArray(self.vec_data, array.array)
slist = SArray(self.list_data, list)
sdict = SArray(self.dict_data, dict)
sint.save('intarr.sidx')
sflt.save('fltarr.sidx')
sstr.save('strarr.sidx')
svec.save('vecarr.sidx')
slist.save('listarr.sidx')
sdict.save('dictarr.sidx')
sint2 = SArray('intarr.sidx')
sflt2 = SArray('fltarr.sidx')
sstr2 = SArray('strarr.sidx')
svec2 = SArray('vecarr.sidx')
slist2 = SArray('listarr.sidx')
sdict2 = SArray('dictarr.sidx')
self.assertRaises(IOError, lambda: SArray('__no_such_file__.sidx'))
self.__test_equal(sint2, self.int_data, int)
self.__test_equal(sflt2, [float(x) for x in self.int_data], float)
self.__test_equal(sstr2, [str(x) for x in self.int_data], str)
self.__test_equal(svec2, self.vec_data, array.array)
self.__test_equal(slist2, self.list_data, list)
self.__test_equal(sdict2, self.dict_data, dict)
# Bad permission
test_dir = 'test_dir'
if os.path.exists(test_dir):
os.removedirs(test_dir)
os.makedirs(test_dir, mode=0000)
with self.assertRaises(IOError):
sint.save(os.path.join(test_dir, 'bad.sidx'))
# Permissions will affect this test first, so no need
# to write something here
with self.assertRaises(IOError):
sint3 = SArray(os.path.join(test_dir, 'bad.sidx'))
os.removedirs(test_dir)
#cleanup
del sint2
del sflt2
del sstr2
del svec2
del slist2
del sdict2
self._remove_sarray_files("intarr")
self._remove_sarray_files("fltarr")
self._remove_sarray_files("strarr")
self._remove_sarray_files("vecarr")
self._remove_sarray_files("listarr")
self._remove_sarray_files("dictarr")
def test_save_load_text(self):
self._remove_single_file('txt_int_arr.txt')
sint = SArray(self.int_data, int)
sint.save('txt_int_arr.txt')
self.assertTrue(os.path.exists('txt_int_arr.txt'))
f = open('txt_int_arr.txt')
lines = f.readlines()
for i in range(len(sint)):
self.assertEquals(int(lines[i]), sint[i])
self._remove_single_file('txt_int_arr.txt')
self._remove_single_file('txt_int_arr')
sint.save('txt_int_arr', format='text')
self.assertTrue(os.path.exists('txt_int_arr'))
f = open('txt_int_arr')
lines = f.readlines()
for i in range(len(sint)):
self.assertEquals(int(lines[i]), sint[i])
self._remove_single_file('txt_int_arr')
def _remove_single_file(self, filename):
try:
os.remove(filename)
except:
pass
def _remove_sarray_files(self, prefix):
filelist = [ f for f in os.listdir(".") if f.startswith(prefix) ]
for f in filelist:
shutil.rmtree(f)
def test_transform(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char), int)
expected_output = [x for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_int, expected_output, int)
# Test randomness across segments, randomized sarray should have different elemetns.
sa_random = SArray(range(0, 16), int).apply(lambda x: random.randint(0, 1000), int)
vec = list(sa_random.head(sa_random.size()))
self.assertFalse(all([x == vec[0] for x in vec]))
# test transform with missing values
sa = SArray([1,2,3,None,4,5])
sa1 = sa.apply(lambda x : x + 1)
self.__test_equal(sa1, [2,3,4,None,5,6], int)
def test_transform_with_multiple_lambda(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char), int)
sa2_int = sa_int.apply(lambda val: val + 1, int)
expected_output = [x for x in range(ord('a') + 1, ord('a') + 26 + 1)]
self.__test_equal(sa2_int, expected_output, int)
def test_transform_with_exception(self):
sa_char = SArray(['a' for i in xrange(10000)], str)
# # type mismatch exception
self.assertRaises(TypeError, lambda: sa_char.apply(lambda char: char, int).head(1))
# # divide by 0 exception
self.assertRaises(ZeroDivisionError, lambda: sa_char.apply(lambda char: ord(char) / 0, float))
def test_transform_with_type_inference(self):
sa_char = SArray(self.url, str)
sa_int = sa_char.apply(lambda char: ord(char))
expected_output = [x for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_int, expected_output, int)
sa_bool = sa_char.apply(lambda char: ord(char) > ord('c'))
expected_output = [int(x > ord('c')) for x in range(ord('a'), ord('a') + 26)]
self.__test_equal(sa_bool, expected_output, int)
# # divide by 0 exception
self.assertRaises(ZeroDivisionError, lambda: sa_char.apply(lambda char: ord(char) / 0))
# Test randomness across segments, randomized sarray should have different elemetns.
sa_random = SArray(range(0, 16), int).apply(lambda x: random.randint(0, 1000))
vec = list(sa_random.head(sa_random.size()))
self.assertFalse(all([x == vec[0] for x in vec]))
def test_transform_on_lists(self):
sa_int = SArray(self.int_data, int)
sa_vec2 = sa_int.apply(lambda x: [x, x+1, str(x)])
expected = [[i, i + 1, str(i)] for i in self.int_data]
self.__test_equal(sa_vec2, expected, list)
sa_int_again = sa_vec2.apply(lambda x: int(x[0]))
self.__test_equal(sa_int_again, self.int_data, int)
# transform from vector to vector
sa_vec = SArray(self.vec_data, array.array)
sa_vec2 = sa_vec.apply(lambda x: x)
self.__test_equal(sa_vec2, self.vec_data, array.array)
# transform on list
sa_list = SArray(self.list_data, list)
sa_list2 = sa_list.apply(lambda x: x)
self.__test_equal(sa_list2, self.list_data, list)
# transform dict to list
sa_dict = SArray(self.dict_data, dict)
sa_list = sa_dict.apply(lambda x: x.keys())
self.__test_equal(sa_list, [x.keys() for x in self.dict_data], list)
def test_transform_dict(self):
# lambda accesses dict
sa_dict = SArray([{'a':1}, {1:2}, {'c': 'a'}, None], dict)
sa_bool_r = sa_dict.apply(lambda x: x.has_key('a') if x != None else None, skip_undefined=False)
expected_output = [1, 0, 0, None]
self.__test_equal(sa_bool_r, expected_output, int)
# lambda returns dict
expected_output = [{'a':1}, {1:2}, None, {'c': 'a'}]
sa_dict = SArray(expected_output, dict)
lambda_out = sa_dict.apply(lambda x: x)
self.__test_equal(lambda_out, expected_output, dict)
def test_filter_dict(self):
data = [{'a':1}, {1:2}, None, {'c': 'a'}]
expected_output = [{'a':1}]
sa_dict = SArray(expected_output, dict)
ret = sa_dict.filter(lambda x: x.has_key('a'))
self.__test_equal(ret, expected_output, dict)
# try second time to make sure the lambda system still works
expected_output = [{1:2}]
sa_dict = SArray(expected_output, dict)
lambda_out = sa_dict.filter(lambda x: x.has_key(1))
self.__test_equal(lambda_out, expected_output, dict)
def test_filter(self):
# test empty
s = SArray([], float)
no_change = s.filter(lambda x : x == 0)
self.assertEqual(no_change.size(), 0)
# test normal case
s = SArray(self.int_data, int)
middle_of_array = s.filter(lambda x: x > 3 and x < 8)
self.assertEqual(list(middle_of_array.head(10)), [x for x in range(4,8)])
# test normal string case
s = SArray(self.string_data, str)
exp_val_list = [x for x in self.string_data if x != 'world']
# Remove all words whose second letter is not in the first half of the alphabet
second_letter = s.filter(lambda x: len(x) > 1 and (ord(x[1]) > ord('a')) and (ord(x[1]) < ord('n')))
self.assertEqual(list(second_letter.head(10)), exp_val_list)
# test not-a-lambda
def a_filter_func(x):
return ((x > 4.4) and (x < 6.8))
s = SArray(self.int_data, float)
another = s.filter(a_filter_func)
self.assertEqual(list(another.head(10)), [5.,6.])
sa = SArray(self.float_data)
# filter by self
sa2 = sa[sa]
self.assertEquals(list(sa.head(10)), list(sa2.head(10)))
# filter by zeros
sa_filter = SArray([0,0,0,0,0,0,0,0,0,0])
sa2 = sa[sa_filter]
self.assertEquals(len(sa2), 0)
# filter by wrong size
sa_filter = SArray([0,2,5])
with self.assertRaises(IndexError):
sa2 = sa[sa_filter]
def test_any_all(self):
s = SArray([0,1,2,3,4,5,6,7,8,9], int)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), False)
s = SArray([0,0,0,0,0], int)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), False)
s = SArray(self.string_data, str)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), True)
s = SArray(self.int_data, int)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), True)
# test empty
s = SArray([], int)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), True)
s = SArray([[], []], array.array)
self.assertEqual(s.any(), False)
self.assertEqual(s.all(), False)
s = SArray([[],[1.0]], array.array)
self.assertEqual(s.any(), True)
self.assertEqual(s.all(), False)
def test_astype(self):
# test empty
s = SArray([], int)
as_out = s.astype(float)
self.assertEqual(as_out.dtype(), float)
# test float -> int
s = SArray(map(lambda x: x+0.2, self.float_data), float)
as_out = s.astype(int)
self.assertEqual(list(as_out.head(10)), self.int_data)
# test int->string
s = SArray(self.int_data, int)
as_out = s.astype(str)
self.assertEqual(list(as_out.head(10)), map(lambda x: str(x), self.int_data))
i_out = as_out.astype(int)
self.assertEqual(list(i_out.head(10)), list(s.head(10)))
s = SArray(self.vec_data, array.array)
with self.assertRaises(RuntimeError):
s.astype(int)
with self.assertRaises(RuntimeError):
s.astype(float)
s = SArray(["a","1","2","3"])
with self.assertRaises(RuntimeError):
s.astype(int)
self.assertEqual(list(s.astype(int,True).head(4)), [None,1,2,3])
s = SArray(["[1 2 3]","[4;5]"])
ret = list(s.astype(array.array).head(2))
self.assertEqual(ret, [array.array('d',[1,2,3]),array.array('d',[4,5])])
s = SArray(["[1,\"b\",3]","[4,5]"])
ret = list(s.astype(list).head(2))
self.assertEqual(ret, [[1,"b",3],[4,5]])
s = SArray(["{\"a\":2,\"b\":3}","{}"])
ret = list(s.astype(dict).head(2))
self.assertEqual(ret, [{"a":2,"b":3},{}])
s = SArray(["[1abc]"])
ret = list(s.astype(list).head(1))
self.assertEqual(ret, [["1abc"]])
s = SArray(["{1xyz:1a,2b:2}"])
ret = list(s.astype(dict).head(1))
self.assertEqual(ret, [{"1xyz":"1a","2b":2}])
def test_clip(self):
# invalid types
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.clip(25,26)
with self.assertRaises(RuntimeError):
s.clip_lower(25)
with self.assertRaises(RuntimeError):
s.clip_upper(26)
# int w/ int, test lower and upper functions too
# int w/float, no change
s = SArray(self.int_data, int)
clip_out = s.clip(3,7).head(10)
# test that our list isn't cast to float if nothing happened
clip_out_nc = s.clip(0.2, 10.2).head(10)
lclip_out = s.clip_lower(3).head(10)
rclip_out = s.clip_upper(7).head(10)
self.assertEqual(len(clip_out), len(self.int_data))
self.assertEqual(len(lclip_out), len(self.int_data))
self.assertEqual(len(rclip_out), len(self.int_data))
for i in range(0,len(clip_out)):
if i < 2:
self.assertEqual(clip_out[i], 3)
self.assertEqual(lclip_out[i], 3)
self.assertEqual(rclip_out[i], self.int_data[i])
self.assertEqual(clip_out_nc[i], self.int_data[i])
elif i > 6:
self.assertEqual(clip_out[i], 7)
self.assertEqual(lclip_out[i], self.int_data[i])
self.assertEqual(rclip_out[i], 7)
self.assertEqual(clip_out_nc[i], self.int_data[i])
else:
self.assertEqual(clip_out[i], self.int_data[i])
self.assertEqual(clip_out_nc[i], self.int_data[i])
# int w/float, change
# float w/int
# float w/float
clip_out = s.clip(2.8, 7.2).head(10)
fs = SArray(self.float_data, float)
ficlip_out = fs.clip(3, 7).head(10)
ffclip_out = fs.clip(2.8, 7.2).head(10)
for i in range(0,len(clip_out)):
if i < 2:
self.assertAlmostEqual(clip_out[i], 2.8)
self.assertAlmostEqual(ffclip_out[i], 2.8)
self.assertAlmostEqual(ficlip_out[i], 3.)
elif i > 6:
self.assertAlmostEqual(clip_out[i], 7.2)
self.assertAlmostEqual(ffclip_out[i], 7.2)
self.assertAlmostEqual(ficlip_out[i], 7.)
else:
self.assertAlmostEqual(clip_out[i], self.float_data[i])
self.assertAlmostEqual(ffclip_out[i], self.float_data[i])
self.assertAlmostEqual(ficlip_out[i], self.float_data[i])
vs = SArray(self.vec_data, array.array);
clipvs = vs.clip(3, 7).head(100)
self.assertEqual(len(clipvs), len(self.vec_data));
for i in range(0, len(clipvs)):
a = clipvs[i]
b = self.vec_data[i]
self.assertEqual(len(a), len(b))
for j in range(0, len(b)):
if b[j] < 3:
b[j] = 3
elif b[j] > 7:
b[j] = 7
self.assertEqual(a, b)
def test_missing(self):
s=SArray(self.int_data, int)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.int_data + [None], int)
self.assertEqual(s.num_missing(), 1)
s=SArray(self.float_data, float)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.float_data + [None], float)
self.assertEqual(s.num_missing(), 1)
s=SArray(self.string_data, str)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.string_data + [None], str)
self.assertEqual(s.num_missing(), 1)
s=SArray(self.vec_data, array.array)
self.assertEqual(s.num_missing(), 0)
s=SArray(self.vec_data + [None], array.array)
self.assertEqual(s.num_missing(), 1)
def test_nonzero(self):
# test empty
s = SArray([],int)
nz_out = s.nnz()
self.assertEqual(nz_out, 0)
# test all nonzero
s = SArray(self.float_data, float)
nz_out = s.nnz()
self.assertEqual(nz_out, len(self.float_data))
# test all zero
s = SArray([0 for x in range(0,10)], int)
nz_out = s.nnz()
self.assertEqual(nz_out, 0)
# test strings
str_list = copy.deepcopy(self.string_data)
str_list.append("")
s = SArray(str_list, str)
nz_out = s.nnz()
self.assertEqual(nz_out, len(self.string_data))
def test_std_var(self):
# test empty
s = SArray([], int)
self.assertTrue(s.std() is None)
self.assertTrue(s.var() is None)
# increasing ints
s = SArray(self.int_data, int)
self.assertAlmostEqual(s.var(), 8.25)
self.assertAlmostEqual(s.std(), 2.8722813)
# increasing floats
s = SArray(self.float_data, float)
self.assertAlmostEqual(s.var(), 8.25)
self.assertAlmostEqual(s.std(), 2.8722813)
# vary ddof
self.assertAlmostEqual(s.var(ddof=3), 11.7857143)
self.assertAlmostEqual(s.var(ddof=6), 20.625)
self.assertAlmostEqual(s.var(ddof=9), 82.5)
self.assertAlmostEqual(s.std(ddof=3), 3.4330328)
self.assertAlmostEqual(s.std(ddof=6), 4.5414755)
self.assertAlmostEqual(s.std(ddof=9), 9.08295106)
# bad ddof
with self.assertRaises(RuntimeError):
s.var(ddof=11)
with self.assertRaises(RuntimeError):
s.std(ddof=11)
# bad type
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.std()
with self.assertRaises(RuntimeError):
s.var()
# overflow test
huge_int = 9223372036854775807
s = SArray([1, huge_int], int)
self.assertAlmostEqual(s.var(), 21267647932558653957237540927630737409.0)
self.assertAlmostEqual(s.std(), 4611686018427387900.0)
def test_tail(self):
# test empty
s = SArray([], int)
self.assertEqual(len(s.tail()), 0)
# test standard tail
s = SArray([x for x in range(0,40)], int)
self.assertEqual(s.tail(), [x for x in range(30,40)])
# smaller amount
self.assertEqual(s.tail(3), [x for x in range(37,40)])
# larger amount
self.assertEqual(s.tail(40), [x for x in range(0,40)])
# too large
self.assertEqual(s.tail(81), [x for x in range(0,40)])
def test_max_min_sum_mean(self):
# negative and positive
s = SArray([-2,-1,0,1,2], int)
self.assertEqual(s.max(), 2)
self.assertEqual(s.min(), -2)
self.assertEqual(s.sum(), 0)
self.assertAlmostEqual(s.mean(), 0.)
# test valid and invalid types
s = SArray(self.string_data, str)
with self.assertRaises(RuntimeError):
s.max()
with self.assertRaises(RuntimeError):
s.min()
with self.assertRaises(RuntimeError):
s.sum()
with self.assertRaises(RuntimeError):
s.mean()
s = SArray(self.int_data, int)
self.assertEqual(s.max(), 10)
self.assertEqual(s.min(), 1)
self.assertEqual(s.sum(), 55)
self.assertAlmostEqual(s.mean(), 5.5)
s = SArray(self.float_data, float)
self.assertEqual(s.max(), 10.)
self.assertEqual(s.min(), 1.)
self.assertEqual(s.sum(), 55.)
self.assertAlmostEqual(s.mean(), 5.5)
# test all negative
s = SArray(map(lambda x: x*-1, self.int_data), int)
self.assertEqual(s.max(), -1)
self.assertEqual(s.min(), -10)
self.assertEqual(s.sum(), -55)
self.assertAlmostEqual(s.mean(), -5.5)
# test empty
s = SArray([], float)
self.assertTrue(s.max() is None)
self.assertTrue(s.min() is None)
self.assertTrue(s.sum() is None)
self.assertTrue(s.mean() is None)
# test big ints
huge_int = 9223372036854775807
s = SArray([1, huge_int], int)
self.assertEqual(s.max(), huge_int)
self.assertEqual(s.min(), 1)
# yes, we overflow
self.assertEqual(s.sum(), (huge_int+1)*-1)
# ...but not here
self.assertAlmostEqual(s.mean(), 4611686018427387904.)
a = SArray([[1,2],[1,2],[1,2]], array.array)
self.assertEqual(a.sum(), array.array('d', [3,6]))
self.assertEqual(a.mean(), array.array('d', [1,2]))
with self.assertRaises(RuntimeError):
a.max()
with self.assertRaises(RuntimeError):
a.min()
a = SArray([[1,2],[1,2],[1,2,3]], array.array)
with self.assertRaises(RuntimeError):
a.sum()
with self.assertRaises(RuntimeError):
a.mean()
def test_python_special_functions(self):
s = SArray([], int)
self.assertEqual(len(s), 0)
self.assertEqual(str(s), '[]')
self.assertEqual(bool(s), False)
# increasing ints
s = SArray(self.int_data, int)
self.assertEqual(len(s), len(self.int_data))
self.assertEqual(str(s), str(self.int_data))
self.assertEqual(bool(s), True)
realsum = sum(self.int_data)
sum1 = sum([x for x in s])
sum2 = s.sum()
sum3 = s.apply(lambda x:x, int).sum()
self.assertEquals(sum1, realsum)
self.assertEquals(sum2, realsum)
self.assertEquals(sum3, realsum)
def test_scalar_operators(self):
s=np.array([1,2,3,4,5,6,7,8,9,10]);
t = SArray(s, int)
self.__test_equal(t + 1, list(s + 1), int)
self.__test_equal(t - 1, list(s - 1), int)
# we handle division differently. All divisions cast to float
self.__test_equal(t / 2, list(s / 2.0), float)
self.__test_equal(t * 2, list(s * 2), int)
self.__test_equal(t < 5, list(s < 5), int)
self.__test_equal(t > 5, list(s > 5), int)
self.__test_equal(t <= 5, list(s <= 5), int)
self.__test_equal(t >= 5, list(s >= 5), int)
self.__test_equal(t == 5, list(s == 5), int)
self.__test_equal(t != 5, list(s != 5), int)
self.__test_equal(1.5 + t, list(1.5 + s), float)
self.__test_equal(1.5 - t, list(1.5 - s), float)
self.__test_equal(2.0 / t, list(2.0 / s), float)
self.__test_equal(2 / t, list(2.0 / s), float)
self.__test_equal(2.5 * t, list(2.5 * s), float)
s=["a","b","c"]
t = SArray(s, str)
self.__test_equal(t + "x", [i + "x" for i in s], str)
with self.assertRaises(RuntimeError):
t - 'x'
with self.assertRaises(RuntimeError):
t * 'x'
with self.assertRaises(RuntimeError):
t / 'x'
s = SArray(self.vec_data, array.array)
self.__test_equal(s + 1, [array.array('d', [float(j) + 1 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s - 1, [array.array('d', [float(j) - 1 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s * 2, [array.array('d', [float(j) * 2 for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s / 2, [array.array('d', [float(j) / 2 for j in i]) for i in self.vec_data], array.array)
s = SArray([1,2,3,4,None])
self.__test_equal(s == None, [0,0,0,0,1], int)
self.__test_equal(s != None, [1,1,1,1,0], int)
def test_vector_operators(self):
s=np.array([1,2,3,4,5,6,7,8,9,10]);
s2=np.array([5,4,3,2,1,10,9,8,7,6]);
t = SArray(s, int)
t2 = SArray(s2, int)
self.__test_equal(t + t2, list(s + s2), int)
self.__test_equal(t - t2, list(s - s2), int)
# we handle division differently. All divisions cast to float
self.__test_equal(t / t2, list(s.astype(float) / s2), float)
self.__test_equal(t * t2, list(s * s2), int)
self.__test_equal(t < t2, list(s < s2), int)
self.__test_equal(t > t2, list(s > s2), int)
self.__test_equal(t <= t2, list(s <= s2), int)
self.__test_equal(t >= t2, list(s >= s2), int)
self.__test_equal(t == t2, list(s == s2), int)
self.__test_equal(t != t2, list(s != s2), int)
s = SArray(self.vec_data, array.array)
self.__test_equal(s + s, [array.array('d', [float(j) + float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s - s, [array.array('d', [float(j) - float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s * s, [array.array('d', [float(j) * float(j) for j in i]) for i in self.vec_data], array.array)
self.__test_equal(s / s, [array.array('d', [float(j) / float(j) for j in i]) for i in self.vec_data], array.array)
t = SArray(self.float_data, float)
self.__test_equal(s + t, [array.array('d', [float(j) + i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_equal(s - t, [array.array('d', [float(j) - i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_equal(s * t, [array.array('d', [float(j) * i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
self.__test_equal(s / t, [array.array('d', [float(j) / i[1] for j in i[0]]) for i in zip(self.vec_data, self.float_data)], array.array)
s = SArray([1,2,3,4,None])
self.assertTrue((s==s).all())
s = SArray([1,2,3,4,None])
self.assertFalse((s!=s).any())
def test_logical_ops(self):
s=np.array([0,0,0,0,1,1,1,1]);
s2=np.array([0,1,0,1,0,1,0,1]);
t = SArray(s, int)
t2 = SArray(s2, int)
self.__test_equal(t & t2, list(((s & s2) > 0).astype(int)), int)
self.__test_equal(t | t2, list(((s | s2) > 0).astype(int)), int)
def test_string_operators(self):
s=["a","b","c","d","e","f","g","h","i","j"];
s2=["e","d","c","b","a","j","i","h","g","f"];
t = SArray(s, str)
t2 = SArray(s2, str)
self.__test_equal(t + t2, ["".join(x) for x in zip(s,s2)], str)
self.__test_equal(t + "x", [x + "x" for x in s], str)
self.__test_equal(t < t2, [x < y for (x,y) in zip(s,s2)], int)
self.__test_equal(t > t2, [x > y for (x,y) in zip(s,s2)], int)
self.__test_equal(t == t2, [x == y for (x,y) in zip(s,s2)], int)
self.__test_equal(t != t2, [x != y for (x,y) in zip(s,s2)], int)
self.__test_equal(t <= t2, [x <= y for (x,y) in zip(s,s2)], int)
self.__test_equal(t >= t2, [x >= y for (x,y) in zip(s,s2)], int)
def test_vector_operator_missing_propagation(self):
t = SArray([1,2,3,4,None,6,7,8,9,None], float) # missing 4th and 9th
t2 = SArray([None,4,3,2,np.nan,10,9,8,7,6], float) # missing 0th and 4th
self.assertEquals(len((t + t2).dropna()), 7);
self.assertEquals(len((t - t2).dropna()), 7);
self.assertEquals(len((t * t2).dropna()), 7);
def test_dropna(self):
no_nas = ['strings', 'yeah', 'nan', 'NaN', 'NA', 'None']
t = SArray(no_nas)
self.assertEquals(len(t.dropna()), 6)
self.assertEquals(list(t.dropna()), no_nas)
t2 = SArray([None,np.nan])
self.assertEquals(len(t2.dropna()), 0)
self.assertEquals(list(SArray(self.int_data).dropna()), self.int_data)
self.assertEquals(list(SArray(self.float_data).dropna()), self.float_data)
def test_fillna(self):
# fillna shouldn't fill anything
no_nas = ['strings', 'yeah', 'nan', 'NaN', 'NA', 'None']
t = SArray(no_nas)
out = t.fillna('hello')
self.assertEquals(list(out), no_nas)
# Normal integer case (float auto casted to int)
t = SArray([53,23,None,np.nan,5])
self.assertEquals(list(t.fillna(-1.0)), [53,23,-1,-1,5])
# dict type
t = SArray(self.dict_data+[None])
self.assertEquals(list(t.fillna({1:'1'})), self.dict_data+[{1:'1'}])
# list type
t = SArray(self.list_data+[None])
self.assertEquals(list(t.fillna([0,0,0])), self.list_data+[[0,0,0]])
# vec type
t = SArray(self.vec_data+[None])
self.assertEquals(list(t.fillna(array.array('f',[0.0,0.0]))), self.vec_data+[array.array('f',[0.0,0.0])])
# empty sarray
t = SArray()
self.assertEquals(len(t.fillna(0)), 0)
def test_sample(self):
sa = SArray(data=self.int_data)
sa_sample = sa.sample(.5, 9)
sa_sample2 = sa.sample(.5, 9)
self.assertEqual(sa_sample.head(), sa_sample2.head())
for i in sa_sample:
self.assertTrue(i in self.int_data)
with self.assertRaises(ValueError):
sa.sample(3)
sa_sample = SArray().sample(.5, 9)
self.assertEqual(len(sa_sample), 0)
def test_vector_slice(self):
d=[[1],[1,2],[1,2,3]]
g=SArray(d, array.array)
self.assertEqual(list(g.vector_slice(0).head()), [1,1,1])
self.assertEqual(list(g.vector_slice(0,2).head()), [None,array.array('d', [1,2]),array.array('d', [1,2])])
self.assertEqual(list(g.vector_slice(0,3).head()), [None,None,array.array('d', [1,2,3])])
g=SArray(self.vec_data, array.array);
self.__test_equal(g.vector_slice(0), self.float_data, float)
self.__test_equal(g.vector_slice(0, 2), self.vec_data, array.array)
def test_lazy_eval(self):
sa = SArray(range(-10, 10))
sa = sa + 1
sa1 = sa >= 0
sa2 = sa <= 0
sa3 = sa[sa1 & sa2]
item_count = sa3.size()
self.assertEqual(item_count, 1)
def __test_append(self, data1, data2, dtype):
sa1 = SArray(data1, dtype)
sa2 = SArray(data2, dtype)
sa3 = sa1.append(sa2)
self.__test_equal(sa3, data1 + data2, dtype)
sa3 = sa2.append(sa1)
self.__test_equal(sa3, data2 + data1, dtype)
def test_append(self):
n = len(self.int_data)
m = n / 2
self.__test_append(self.int_data[0:m], self.int_data[m:n], int)
self.__test_append(self.bool_data[0:m], self.bool_data[m:n], int)
self.__test_append(self.string_data[0:m], self.string_data[m:n], str)
self.__test_append(self.float_data[0:m], self.float_data[m:n], float)
self.__test_append(self.vec_data[0:m], self.vec_data[m:n], array.array)
self.__test_append(self.dict_data[0:m], self.dict_data[m:n], dict)
def test_append_exception(self):
val1 = [i for i in range(1, 1000)]
val2 = [str(i) for i in range(-10, 1)]
sa1 = SArray(val1, int)
sa2 = SArray(val2, str)
with self.assertRaises(RuntimeError):
sa3 = sa1.append(sa2)
def test_word_count(self):
sa = SArray(["This is someurl http://someurl!!", "中文 应该也 行", 'Сблъсъкът между'])
expected = [{"this": 1, "someurl": 2, "is": 1, "http": 1}, {"中文": 1, "应该也": 1, "行": 1}, {"Сблъсъкът": 1, "между": 1}]
expected2 = [{"This": 1, "someurl": 2, "is": 1, "http": 1}, {"中文": 1, "应该也": 1, "行": 1}, {"Сблъсъкът": 1, "между": 1}]
sa1 = sa._count_words()
self.assertEquals(sa1.dtype(), dict)
self.__test_equal(sa1, expected, dict)
sa1 = sa._count_words(to_lower=False)
self.assertEquals(sa1.dtype(), dict)
self.__test_equal(sa1, expected2, dict)
#should fail if the input type is not string
sa = SArray([1, 2, 3])
with self.assertRaises(TypeError):
sa._count_words()
def test_ngram_count(self):
sa_word = SArray(["I like big dogs. They are fun. I LIKE BIG DOGS", "I like.", "I like big"])
sa_character = SArray(["Fun. is. fun","Fun is fun.","fu", "fun"])
# Testing word n-gram functionality
result = sa_word._count_ngrams(3)
result2 = sa_word._count_ngrams(2)
result3 = sa_word._count_ngrams(3,"word", to_lower=False)
result4 = sa_word._count_ngrams(2,"word", to_lower=False)
expected = [{'fun i like': 1, 'i like big': 2, 'they are fun': 1, 'big dogs they': 1, 'like big dogs': 2, 'are fun i': 1, 'dogs they are': 1}, {}, {'i like big': 1}]
expected2 = [{'i like': 2, 'dogs they': 1, 'big dogs': 2, 'are fun': 1, 'like big': 2, 'they are': 1, 'fun i': 1}, {'i like': 1}, {'i like': 1, 'like big': 1}]
expected3 = [{'I like big': 1, 'fun I LIKE': 1, 'I LIKE BIG': 1, 'LIKE BIG DOGS': 1, 'They are fun': 1, 'big dogs They': 1, 'like big dogs': 1, 'are fun I': 1, 'dogs They are': 1}, {}, {'I like big': 1}]
expected4 = [{'I like': 1, 'like big': 1, 'I LIKE': 1, 'BIG DOGS': 1, 'are fun': 1, 'LIKE BIG': 1, 'big dogs': 1, 'They are': 1, 'dogs They': 1, 'fun I': 1}, {'I like': 1}, {'I like': 1, 'like big': 1}]
self.assertEquals(result.dtype(), dict)
self.__test_equal(result, expected, dict)
self.assertEquals(result2.dtype(), dict)
self.__test_equal(result2, expected2, dict)
self.assertEquals(result3.dtype(), dict)
self.__test_equal(result3, expected3, dict)
self.assertEquals(result4.dtype(), dict)
self.__test_equal(result4, expected4, dict)
#Testing character n-gram functionality
result5 = sa_character._count_ngrams(3, "character")
result6 = sa_character._count_ngrams(2, "character")
result7 = sa_character._count_ngrams(3, "character", to_lower=False)
result8 = sa_character._count_ngrams(2, "character", to_lower=False)
result9 = sa_character._count_ngrams(3, "character", to_lower=False, ignore_space=False)
result10 = sa_character._count_ngrams(2, "character", to_lower=False, ignore_space=False)
result11 = sa_character._count_ngrams(3, "character", to_lower=True, ignore_space=False)
result12 = sa_character._count_ngrams(2, "character", to_lower=True, ignore_space=False)
expected5 = [{'fun': 2, 'nis': 1, 'sfu': 1, 'isf': 1, 'uni': 1}, {'fun': 2, 'nis': 1, 'sfu': 1, 'isf': 1, 'uni': 1}, {}, {'fun': 1}]
expected6 = [{'ni': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 2}, {'ni': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 2}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected7 = [{'sfu': 1, 'Fun': 1, 'uni': 1, 'fun': 1, 'nis': 1, 'isf': 1}, {'sfu': 1, 'Fun': 1, 'uni': 1, 'fun': 1, 'nis': 1, 'isf': 1}, {}, {'fun': 1}]
expected8 = [{'ni': 1, 'Fu': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 1}, {'ni': 1, 'Fu': 1, 'is': 1, 'un': 2, 'sf': 1, 'fu': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected9 = [{' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'Fun': 1, 'n i': 1, 'fun': 1, 'is ': 1}, {' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'Fun': 1, 'n i': 1, 'fun': 1, 'is ': 1}, {}, {'fun': 1}]
expected10 = [{' f': 1, 'fu': 1, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1, 'Fu': 1}, {' f': 1, 'fu': 1, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1, 'Fu': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
expected11 = [{' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'n i': 1, 'fun': 2, 'is ': 1}, {' fu': 1, ' is': 1, 's f': 1, 'un ': 1, 'n i': 1, 'fun': 2, 'is ': 1}, {}, {'fun': 1}]
expected12 = [{' f': 1, 'fu': 2, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1}, {' f': 1, 'fu': 2, 'n ': 1, 'is': 1, ' i': 1, 'un': 2, 's ': 1}, {'fu': 1}, {'un': 1, 'fu': 1}]
self.assertEquals(result5.dtype(), dict)
self.__test_equal(result5, expected5, dict)
self.assertEquals(result6.dtype(), dict)
self.__test_equal(result6, expected6, dict)
self.assertEquals(result7.dtype(), dict)
self.__test_equal(result7, expected7, dict)
self.assertEquals(result8.dtype(), dict)
self.__test_equal(result8, expected8, dict)
self.assertEquals(result9.dtype(), dict)
self.__test_equal(result9, expected9, dict)
self.assertEquals(result10.dtype(), dict)
self.__test_equal(result10, expected10, dict)
self.assertEquals(result11.dtype(), dict)
self.__test_equal(result11, expected11, dict)
self.assertEquals(result12.dtype(), dict)
self.__test_equal(result12, expected12, dict)
sa = SArray([1, 2, 3])
with self.assertRaises(TypeError):
#should fail if the input type is not string
sa._count_ngrams()
with self.assertRaises(TypeError):
#should fail if n is not of type 'int'
sa_word._count_ngrams(1.01)
with self.assertRaises(ValueError):
#should fail with invalid method
sa_word._count_ngrams(3,"bla")
with self.assertRaises(ValueError):
#should fail with n <0
sa_word._count_ngrams(0)
with warnings.catch_warnings(True) as context:
sa_word._count_ngrams(10)
assert len(context) == 1
def test_dict_keys(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
sa = SArray(self.dict_data)
sa_keys = sa.dict_keys()
self.assertEquals(sa_keys, [str(i) for i in self.int_data])
# na value
d = [{'a': 1}, {None: 2}, {"b": None}, None]
sa = SArray(d)
sa_keys = sa.dict_keys()
self.assertEquals(sa_keys, [['a'], [None], ['b'], None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_keys()
# empty SArray with type
sa = SArray([], dict)
self.assertEquals(list(sa.dict_keys().head(10)), [], list)
def test_dict_values(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
sa = SArray(self.dict_data)
sa_values = sa.dict_values()
self.assertEquals(sa_values, [[i, float(i)] for i in self.int_data])
# na value
d = [{'a': 1}, {None: 'str'}, {"b": None}, None]
sa = SArray(d)
sa_values = sa.dict_values()
self.assertEquals(sa_values, [[1], ['str'], [None], None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_values()
# empty SArray with type
sa = SArray([], dict)
self.assertEquals(list(sa.dict_values().head(10)), [], list)
def test_dict_trim_by_keys(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
d = [{'a':1, 'b': [1,2]}, {None: 'str'}, {"b": None, "c": 1}, None]
sa = SArray(d)
sa_values = sa.dict_trim_by_keys(['a', 'b'])
self.assertEquals(sa_values, [{}, {None: 'str'}, {"c": 1}, None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_trim_by_keys([])
sa = SArray([], dict)
self.assertEquals(list(sa.dict_trim_by_keys([]).head(10)), [], list)
def test_dict_trim_by_values(self):
# self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None]
sa = SArray(d)
sa_values = sa.dict_trim_by_values(5,10)
self.assertEquals(sa_values, [{'b': 20, 'c':None}, {None:5}, None])
# no upper key
sa_values = sa.dict_trim_by_values(2)
self.assertEquals(sa_values, [{'b': 20}, {"b": 4, None:5}, None])
# no param
sa_values = sa.dict_trim_by_values()
self.assertEquals(sa_values, [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None])
# no lower key
sa_values = sa.dict_trim_by_values(upper=7)
self.assertEquals(sa_values, [{'a':1, 'c':None}, {"b": 4, None: 1}, None])
#empty SArray
sa = SArray()
with self.assertRaises(RuntimeError):
sa.dict_trim_by_values()
sa = SArray([], dict)
self.assertEquals(list(sa.dict_trim_by_values().head(10)), [], list)
def test_dict_has_any_keys(self):
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None, {'a':0}]
sa = SArray(d)
sa_values = sa.dict_has_any_keys([])
self.assertEquals(sa_values, [0,0,0,0])
sa_values = sa.dict_has_any_keys(['a'])
self.assertEquals(sa_values, [1,0,0,1])
# one value is auto convert to list
sa_values = sa.dict_has_any_keys("a")
self.assertEquals(sa_values, [1,0,0,1])
sa_values = sa.dict_has_any_keys(['a', 'b'])
self.assertEquals(sa_values, [1,1,0,1])
with self.assertRaises(TypeError):
sa.dict_has_any_keys()
#empty SArray
sa = SArray()
with self.assertRaises(TypeError):
sa.dict_has_any_keys()
sa = SArray([], dict)
self.assertEquals(list(sa.dict_has_any_keys([]).head(10)), [], list)
def test_dict_has_all_keys(self):
d = [{'a':1, 'b': 20, 'c':None}, {"b": 4, None: 5}, None, {'a':0}]
sa = SArray(d)
sa_values = sa.dict_has_all_keys([])
self.assertEquals(sa_values, [1,1,0,1])
sa_values = sa.dict_has_all_keys(['a'])
self.assertEquals(sa_values, [1,0,0,1])
# one value is auto convert to list
sa_values = sa.dict_has_all_keys("a")
self.assertEquals(sa_values, [1,0,0,1])
sa_values = sa.dict_has_all_keys(['a', 'b'])
self.assertEquals(sa_values, [1,0,0,0])
sa_values = sa.dict_has_all_keys([None, "b"])
self.assertEquals(sa_values, [0,1,0,0])
with self.assertRaises(TypeError):
sa.dict_has_all_keys()
#empty SArray
sa = SArray()
with self.assertRaises(TypeError):
sa.dict_has_all_keys()
sa = SArray([], dict)
self.assertEquals(list(sa.dict_has_all_keys([]).head(10)), [], list)
def test_save_load_cleanup_file(self):
# simlarly for SArray
with util.TempDirectory() as f:
sa = SArray(range(1,1000000))
sa.save(f)
# 17 for each sarray, 1 object.bin, 1 ini
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# sf1 now references the on disk file
sa1 = SArray(f);
# create another SFrame and save to the same location
sa2 = SArray([str(i) for i in range(1,100000)])
sa2.save(f)
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# now sf1 should still be accessible
self.__test_equal(sa1, list(sa), int)
# and sf2 is correct too
sa3 = SArray(f)
self.__test_equal(sa3, list(sa2), str)
# when sf1 goes out of scope, the tmp files should be gone
sa1 = 1
time.sleep(1) # give time for the files being deleted
file_count = len(os.listdir(f))
self.assertTrue(file_count > 2)
# list_to_compare must have all unique values for this to work
def __generic_unique_test(self, list_to_compare):
test = SArray(list_to_compare + list_to_compare)
self.assertEquals(sorted(list(test.unique())), sorted(list_to_compare))
def test_unique(self):
# Test empty SArray
test = SArray([])
self.assertEquals(list(test.unique()), [])
# Test one value
test = SArray([1])
self.assertEquals(list(test.unique()), [1])
# Test many of one value
test = SArray([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
self.assertEquals(list(test.unique()), [1])
# Test all unique values
test = SArray(self.int_data)
self.assertEquals(sorted(list(test.unique())), self.int_data)
# Test an interesting sequence
interesting_ints = [4654,4352436,5453,7556,45435,4654,5453,4654,5453,1,1,1,5,5,5,8,66,7,7,77,90,-34]
test = SArray(interesting_ints)
u = test.unique()
self.assertEquals(len(u), 13)
# We do not preserve order
self.assertEquals(sorted(list(u)), sorted(np.unique(interesting_ints)))
# Test other types
self.__generic_unique_test(self.string_data[0:6])
# only works reliably because these are values that floats can perform
# reliable equality tests
self.__generic_unique_test(self.float_data)
self.__generic_unique_test(self.list_data)
self.__generic_unique_test(self.vec_data)
with self.assertRaises(TypeError):
SArray(self.dict_data).unique()
def test_item_len(self):
# empty SArray
test = SArray([])
with self.assertRaises(TypeError):
self.assertEquals(test.item_length())
# wrong type
test = SArray([1,2,3])
with self.assertRaises(TypeError):
self.assertEquals(test.item_length())
test = SArray(['1','2','3'])
with self.assertRaises(TypeError):
self.assertEquals(test.item_length())
# vector type
test = SArray([[], [1], [1,2], [1,2,3], None])
item_length = test.item_length();
self.assertEquals(list(item_length), list([0, 1,2,3,None]))
# dict type
test = SArray([{}, {'key1': 1}, {'key2':1, 'key1':2}, None])
self.assertEquals(list(test.item_length()), list([0, 1,2,None]))
# list type
test = SArray([[], [1,2], ['str', 'str2'], None])
self.assertEquals(list(test.item_length()), list([0, 2,2,None]))
def test_random_access(self):
t = list(range(0,100000))
s = SArray(t)
# simple slices
self.__test_equal(s[1:10000], t[1:10000], int)
self.__test_equal(s[0:10000:3], t[0:10000:3], int)
self.__test_equal(s[1:10000:3], t[1:10000:3], int)
self.__test_equal(s[2:10000:3], t[2:10000:3], int)
self.__test_equal(s[3:10000:101], t[3:10000:101], int)
# negative slices
self.__test_equal(s[-5:], t[-5:], int)
self.__test_equal(s[-1:], t[-1:], int)
self.__test_equal(s[-100:-10], t[-100:-10], int)
self.__test_equal(s[-100:-10:2], t[-100:-10:2], int)
# single element reads
self.assertEquals(s[511], t[511])
self.assertEquals(s[1912], t[1912])
self.assertEquals(s[-1], t[-1])
self.assertEquals(s[-10], t[-10])
# A cache boundary
self.assertEquals(s[32*1024-1], t[32*1024-1])
self.assertEquals(s[32*1024], t[32*1024])
# totally different
self.assertEquals(s[19312], t[19312])
# edge case odities
self.__test_equal(s[10:100:100], t[10:100:100], int)
self.__test_equal(s[-100:len(s):10], t[-100:len(t):10], int)
self.__test_equal(s[-1:-2], t[-1:-2], int)
self.__test_equal(s[-1:-1000:2], t[-1:-1000:2], int)
with self.assertRaises(IndexError):
s[len(s)]
# with caching abilities; these should be fast, as 32K
# elements are cached.
for i in range(0, 100000, 100):
self.assertEquals(s[i], t[i])
for i in range(0, 100000, 100):
self.assertEquals(s[-i], t[-i])
def test_sort(self):
test = SArray([1,2,3,5,1,4])
ascending = SArray([1,1,2,3,4,5])
descending = SArray([5,4,3,2,1,1])
result = test.sort()
self.assertEqual(result, ascending)
result = test.sort(ascending = False)
self.assertEqual(result, descending)
with self.assertRaises(TypeError):
SArray([[1,2], [2,3]]).sort()
def test_unicode_encode_should_not_fail(self):
g=SArray([{'a':u'\u2019'}])
g=SArray([u'123',u'\u2019'])
g=SArray(['123',u'\u2019'])
def test_read_from_avro(self):
data = """Obj\x01\x04\x16avro.schema\xec\x05{"fields": [{"type": "string", "name": "business_id"}, {"type": "string", "name": "date"}, {"type": "string", "name": "review_id"}, {"type": "int", "name": "stars"}, {"type": "string", "name": "text"}, {"type": "string", "name": "type"}, {"type": "string", "name": "user_id"}, {"type": {"type": "map", "values": "int"}, "name": "votes"}], "type": "record", "name": "review"}\x14avro.codec\x08null\x00\x0e7\x91\x0b#.\x8f\xa2H%<G\x9c\x89\x93\xfb\x04\xe8 ,sgBl3UDEcNYKwuUb92CYdA\x142009-01-25,Zj-R0ZZqIKFx56LY2su1iQ\x08\x80\x19The owner of China King had never heard of Yelp...until Jim W rolled up on China King!\n\nThe owner of China King, Michael, is very friendly and chatty. Be Prepared to chat for a few minutes if you strike up a conversation.\n\nThe service here was terrific. We had several people fussing over us but the primary server, Maggie was a gem. \n\nMy wife and the kids opted for the Americanized menu and went with specials like sweet and sour chicken, shrimp in white sauce and garlic beef. Each came came with soup, egg roll and rice. I sampled the garlic beef which they prepared with a kung pao brown sauce (a decision Maggie and my wife arrived at after several minutes of discussion) it had a nice robust flavor and the veggies were fresh and flavorful. I also sampled the shrimp which were succulent and the white sauce had a little more distinctiveness to it than the same sauce at many Chinese restaurants.\n\nI ordered from the traditional menu but went not too adventurous with sizzling plate with scallops and shrimp in black pepper sauce. Very enjoyable. Again, succulent shrimp. The scallops were tasty as well. Realizing that I moved here from Boston and I go into any seafood experience with diminished expectations now that I live in the west, I have to say the scallops are among the fresher and judiciously prepared that I have had in Phoenix.\n\nOverall China King delivered a very tasty and very fresh meal. They have a fairly extensive traditional menu which I look forward to exploring further.\n\nThanks to Christine O for her review...after reading that I knew China King was A-OK.\x0creview,P2kVk4cIWyK4e4h14RhK-Q\x06\nfunny\x08\x0cuseful\x12\x08cool\x0e\x00,arKckMf7lGNYjXjKo6DXcA\x142012-05-05,EyVfhRDlyip2ErKMOHEA-A\x08\xa4\x04We\'ve been here a few times and we love all the fresh ingredients. The pizza is good when you eat it fresh but if you like to eat your pizza cold then you\'ll be biting into hard dough. Their Nutella pizza is good. Take a menu and check out their menu and hours for specials.\x0creview,x1Yl1dpNcWCCEdpME9dg0g\x06\nfunny\x02\x0cuseful\x02\x08cool\x00\x00\x0e7\x91\x0b#.\x8f\xa2H%<G\x9c\x89\x93\xfb"""
test_avro_file = open("test.avro", "wb")
test_avro_file.write(data)
test_avro_file.close()
sa = SArray.from_avro("test.avro")
self.assertEqual(sa.dtype(), dict)
self.assertEqual(len(sa), 2)
def test_from_const(self):
g = SArray.from_const('a', 100)
self.assertEqual(len(g), 100)
self.assertEqual(list(g), ['a']*100)
g = SArray.from_const(dt.datetime(2013, 5, 7, 10, 4, 10),10)
self.assertEqual(len(g), 10)
self.assertEqual(list(g), [dt.datetime(2013, 5, 7, 10, 4, 10,tzinfo=GMT(0))]*10)
g = SArray.from_const(0, 0)
self.assertEqual(len(g), 0)
g = SArray.from_const(None, 100)
self.assertEquals(list(g), [None] * 100)
self.assertEqual(g.dtype(), float)
def test_from_sequence(self):
with self.assertRaises(TypeError):
g = SArray.from_sequence()
g = SArray.from_sequence(100)
self.assertEqual(list(g), range(100))
g = SArray.from_sequence(10, 100)
self.assertEqual(list(g), range(10, 100))
g = SArray.from_sequence(100, 10)
self.assertEqual(list(g), range(100, 10))
def test_datetime_to_str(self):
sa = SArray(self.datetime_data)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,['2013-05-07T10:04:10GMT+00', '1902-10-21T10:34:10GMT+00', None],str)
sa = SArray([None,None,None],dtype=dt.datetime)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,[None,None,None],str)
sa = SArray(dtype=dt.datetime)
sa_string_back = sa.datetime_to_str()
self.__test_equal(sa_string_back,[],str)
sa = SArray([None,None,None])
self.assertRaises(TypeError,sa.datetime_to_str)
sa = SArray()
self.assertRaises(TypeError,sa.datetime_to_str)
def test_str_to_datetime(self):
sa_string = SArray(['2013-05-07T10:04:10GMT+00', '1902-10-21T10:34:10GMT+00', None])
sa_datetime_back = sa_string.str_to_datetime()
expected = [dt.datetime(2013, 5, 7, 10, 4, 10,tzinfo=GMT(0)),dt.datetime(1902, 10, 21, 10, 34, 10,tzinfo=GMT(0)),None]
self.__test_equal(sa_datetime_back,expected,dt.datetime)
sa_string = SArray([None,None,None],str)
sa_datetime_back = sa_string.str_to_datetime()
self.__test_equal(sa_datetime_back,[None,None,None],dt.datetime)
sa_string = SArray(dtype=str)
sa_datetime_back = sa_string.str_to_datetime()
self.__test_equal(sa_datetime_back,[],dt.datetime)
sa = SArray([None,None,None])
self.assertRaises(TypeError,sa.str_to_datetime)
sa = SArray()
self.assertRaises(TypeError,sa.str_to_datetime)
# hour without leading zero
sa = SArray(['10/30/2014 9:01'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M')
expected = [dt.datetime(2014, 10, 30, 9, 1, tzinfo=GMT(0))]
self.__test_equal(sa,expected,dt.datetime)
# without delimiters
sa = SArray(['10302014 0901', '10302014 2001'])
sa = sa.str_to_datetime('%m%d%Y %H%M')
expected = [dt.datetime(2014, 10, 30, 9, 1, tzinfo=GMT(0)),
dt.datetime(2014, 10, 30, 20, 1, tzinfo=GMT(0))]
self.__test_equal(sa,expected,dt.datetime)
# another without delimiter test
sa = SArray(['20110623T191001'])
sa = sa.str_to_datetime("%Y%m%dT%H%M%S%F%q")
expected = [dt.datetime(2011, 06, 23, 19, 10, 1, tzinfo=GMT(0))]
self.__test_equal(sa,expected,dt.datetime)
# am pm
sa = SArray(['10/30/2014 9:01am', '10/30/2014 9:01pm'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M%p')
expected = [dt.datetime(2014, 10, 30, 9, 1, tzinfo=GMT(0)),
dt.datetime(2014, 10, 30, 21, 1, tzinfo=GMT(0))]
self.__test_equal(sa,expected,dt.datetime)
sa = SArray(['10/30/2014 9:01AM', '10/30/2014 9:01PM'])
sa = sa.str_to_datetime('%m/%d/%Y %H:%M%P')
expected = [dt.datetime(2014, 10, 30, 9, 1, tzinfo=GMT(0)),
dt.datetime(2014, 10, 30, 21, 1, tzinfo=GMT(0))]
self.__test_equal(sa,expected,dt.datetime)
# failure 13pm
sa = SArray(['10/30/2014 13:01pm'])
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %H:%M%p')
# failure hour 13 when %l should only have up to hour 12
sa = SArray(['10/30/2014 13:01'])
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %l:%M')
with self.assertRaises(RuntimeError):
sa.str_to_datetime('%m/%d/%Y %L:%M')
def test_apply_with_partial(self):
sa = SArray([1, 2, 3, 4, 5])
def concat_fn(character, number):
return '%s%d' % (character, number)
my_partial_fn = functools.partial(concat_fn, 'x')
sa_transformed = sa.apply(my_partial_fn)
self.assertEqual(list(sa_transformed), ['x1', 'x2', 'x3', 'x4', 'x5'])
def test_apply_with_functor(self):
sa = SArray([1, 2, 3, 4, 5])
class Concatenator(object):
def __init__(self, character):
self.character = character
def __call__(self, number):
return '%s%d' % (self.character, number)
concatenator = Concatenator('x')
sa_transformed = sa.apply(concatenator)
self.assertEqual(list(sa_transformed), ['x1', 'x2', 'x3', 'x4', 'x5'])
| agpl-3.0 |
mhue/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
JetBrains/intellij-community | python/helpers/pydev/pydev_ipython/inputhook.py | 21 | 19415 | # coding: utf-8
"""
Inputhook management for GUI event loop integration.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import select
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Constants for identifying the GUI toolkits.
GUI_WX = 'wx'
GUI_QT = 'qt'
GUI_QT4 = 'qt4'
GUI_QT5 = 'qt5'
GUI_GTK = 'gtk'
GUI_TK = 'tk'
GUI_OSX = 'osx'
GUI_GLUT = 'glut'
GUI_PYGLET = 'pyglet'
GUI_GTK3 = 'gtk3'
GUI_NONE = 'none' # i.e. disable
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def ignore_CTRL_C():
"""Ignore CTRL+C (not implemented)."""
pass
def allow_CTRL_C():
"""Take CTRL+C into account (not implemented)."""
pass
#-----------------------------------------------------------------------------
# Main InputHookManager class
#-----------------------------------------------------------------------------
class InputHookManager(object):
"""Manage PyOS_InputHook for different GUI toolkits.
This class installs various hooks under ``PyOSInputHook`` to handle
GUI event loop integration.
"""
def __init__(self):
self._return_control_callback = None
self._apps = {}
self._reset()
self.pyplot_imported = False
def _reset(self):
self._callback_pyfunctype = None
self._callback = None
self._current_gui = None
def set_return_control_callback(self, return_control_callback):
self._return_control_callback = return_control_callback
def get_return_control_callback(self):
return self._return_control_callback
def return_control(self):
return self._return_control_callback()
def get_inputhook(self):
return self._callback
def set_inputhook(self, callback):
"""Set inputhook to callback."""
# We don't (in the context of PyDev console) actually set PyOS_InputHook, but rather
# while waiting for input on xmlrpc we run this code
self._callback = callback
def clear_inputhook(self, app=None):
"""Clear input hook.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
self._reset()
def clear_app_refs(self, gui=None):
"""Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
if gui is None:
self._apps = {}
elif gui in self._apps:
del self._apps[gui]
def enable_wx(self, app=None):
"""Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
import wx
from distutils.version import LooseVersion as V
wx_version = V(wx.__version__).version # @UndefinedVariable
if wx_version < [2, 8]:
raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__) # @UndefinedVariable
from pydev_ipython.inputhookwx import inputhook_wx
self.set_inputhook(inputhook_wx)
self._current_gui = GUI_WX
if app is None:
app = wx.GetApp() # @UndefinedVariable
if app is None:
app = wx.App(redirect=False, clearSigInt=False) # @UndefinedVariable
app._in_event_loop = True
self._apps[GUI_WX] = app
return app
def disable_wx(self):
"""Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_WX in self._apps:
self._apps[GUI_WX]._in_event_loop = False
self.clear_inputhook()
def enable_qt(self, app=None):
from pydev_ipython.qt_for_kernel import QT_API, QT_API_PYQT5
if QT_API == QT_API_PYQT5:
self.enable_qt5(app)
else:
self.enable_qt4(app)
def enable_qt4(self, app=None):
"""Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
from pydev_ipython.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self, app)
self.set_inputhook(inputhook_qt4)
self._current_gui = GUI_QT4
app._in_event_loop = True
self._apps[GUI_QT4] = app
return app
def disable_qt4(self):
"""Disable event loop integration with PyQt4.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_QT4 in self._apps:
self._apps[GUI_QT4]._in_event_loop = False
self.clear_inputhook()
def enable_qt5(self, app=None):
from pydev_ipython.inputhookqt5 import create_inputhook_qt5
app, inputhook_qt5 = create_inputhook_qt5(self, app)
self.set_inputhook(inputhook_qt5)
self._current_gui = GUI_QT5
app._in_event_loop = True
self._apps[GUI_QT5] = app
return app
def disable_qt5(self):
if GUI_QT5 in self._apps:
self._apps[GUI_QT5]._in_event_loop = False
self.clear_inputhook()
def enable_gtk(self, app=None):
"""Enable event loop integration with PyGTK.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for PyGTK, which allows
the PyGTK to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk import create_inputhook_gtk
self.set_inputhook(create_inputhook_gtk(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_tk(self, app=None):
"""Enable event loop integration with Tk.
Parameters
----------
app : toplevel :class:`Tkinter.Tk` widget, optional.
Running toplevel widget to use. If not given, we probe Tk for an
existing one, and create a new one if none is found.
Notes
-----
If you have already created a :class:`Tkinter.Tk` object, the only
thing done by this method is to register with the
:class:`InputHookManager`, since creating that object automatically
sets ``PyOS_InputHook``.
"""
self._current_gui = GUI_TK
if app is None:
try:
import Tkinter as _TK
except:
# Python 3
import tkinter as _TK # @UnresolvedImport
app = _TK.Tk()
app.withdraw()
self._apps[GUI_TK] = app
from pydev_ipython.inputhooktk import create_inputhook_tk
self.set_inputhook(create_inputhook_tk(app))
return app
def disable_tk(self):
"""Disable event loop integration with Tkinter.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_glut(self, app=None):
""" Enable event loop integration with GLUT.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
integrate with terminal based applications like IPython. Due to GLUT
limitations, it is currently not possible to start the event loop
without first creating a window. You should thus not create another
window but use instead the created one. See 'gui-glut.py' in the
docs/examples/lib directory.
The default screen mode is set to:
glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
"""
import OpenGL.GLUT as glut # @UnresolvedImport
from pydev_ipython.inputhookglut import glut_display_mode, \
glut_close, glut_display, \
glut_idle, inputhook_glut
if GUI_GLUT not in self._apps:
glut.glutInit(sys.argv)
glut.glutInitDisplayMode(glut_display_mode)
# This is specific to freeglut
if bool(glut.glutSetOption):
glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS)
glut.glutCreateWindow(sys.argv[0])
glut.glutReshapeWindow(1, 1)
glut.glutHideWindow()
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
else:
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
self.set_inputhook(inputhook_glut)
self._current_gui = GUI_GLUT
self._apps[GUI_GLUT] = True
def disable_glut(self):
"""Disable event loop integration with glut.
This sets PyOS_InputHook to NULL and set the display function to a
dummy one and set the timer to a dummy timer that will be triggered
very far in the future.
"""
import OpenGL.GLUT as glut # @UnresolvedImport
from glut_support import glutMainLoopEvent # @UnresolvedImport
glut.glutHideWindow() # This is an event to be processed below
glutMainLoopEvent()
self.clear_inputhook()
def enable_pyglet(self, app=None):
"""Enable event loop integration with pyglet.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the ``PyOS_InputHook`` for pyglet, which allows
pyglet to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookpyglet import inputhook_pyglet
self.set_inputhook(inputhook_pyglet)
self._current_gui = GUI_PYGLET
return app
def disable_pyglet(self):
"""Disable event loop integration with pyglet.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_gtk3(self, app=None):
"""Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3
self.set_inputhook(create_inputhook_gtk3(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk3(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_mac(self, app=None):
""" Enable event loop integration with MacOSX.
We call function pyplot.pause, which updates and displays active
figure during pause. It's not MacOSX-specific, but it enables to
avoid inputhooks in native MacOSX backend.
Also we shouldn't import pyplot, until user does it. Cause it's
possible to choose backend before importing pyplot for the first
time only.
"""
def inputhook_mac(app=None):
if self.pyplot_imported:
pyplot = sys.modules['matplotlib.pyplot']
try:
pyplot.pause(0.01)
except:
pass
else:
if 'matplotlib.pyplot' in sys.modules:
self.pyplot_imported = True
self.set_inputhook(inputhook_mac)
self._current_gui = GUI_OSX
def disable_mac(self):
self.clear_inputhook()
def current_gui(self):
"""Return a string indicating the currently active GUI or None."""
return self._current_gui
inputhook_manager = InputHookManager()
enable_wx = inputhook_manager.enable_wx
disable_wx = inputhook_manager.disable_wx
enable_qt = inputhook_manager.enable_qt
enable_qt4 = inputhook_manager.enable_qt4
disable_qt4 = inputhook_manager.disable_qt4
enable_qt5 = inputhook_manager.enable_qt5
disable_qt5 = inputhook_manager.disable_qt5
enable_gtk = inputhook_manager.enable_gtk
disable_gtk = inputhook_manager.disable_gtk
enable_tk = inputhook_manager.enable_tk
disable_tk = inputhook_manager.disable_tk
enable_glut = inputhook_manager.enable_glut
disable_glut = inputhook_manager.disable_glut
enable_pyglet = inputhook_manager.enable_pyglet
disable_pyglet = inputhook_manager.disable_pyglet
enable_gtk3 = inputhook_manager.enable_gtk3
disable_gtk3 = inputhook_manager.disable_gtk3
enable_mac = inputhook_manager.enable_mac
disable_mac = inputhook_manager.disable_mac
clear_inputhook = inputhook_manager.clear_inputhook
set_inputhook = inputhook_manager.set_inputhook
current_gui = inputhook_manager.current_gui
clear_app_refs = inputhook_manager.clear_app_refs
# We maintain this as stdin_ready so that the individual inputhooks
# can diverge as little as possible from their IPython sources
stdin_ready = inputhook_manager.return_control
set_return_control_callback = inputhook_manager.set_return_control_callback
get_return_control_callback = inputhook_manager.get_return_control_callback
get_inputhook = inputhook_manager.get_inputhook
# Convenience function to switch amongst them
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
This is just a utility wrapper around the methods of the InputHookManager
object.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
if get_return_control_callback() is None:
raise ValueError("A return_control_callback must be supplied as a reference before a gui can be enabled")
guis = {GUI_NONE: clear_inputhook,
GUI_OSX: enable_mac,
GUI_TK: enable_tk,
GUI_GTK: enable_gtk,
GUI_WX: enable_wx,
GUI_QT: enable_qt,
GUI_QT4: enable_qt4,
GUI_QT5: enable_qt5,
GUI_GLUT: enable_glut,
GUI_PYGLET: enable_pyglet,
GUI_GTK3: enable_gtk3,
}
try:
gui_hook = guis[gui]
except KeyError:
if gui is None or gui == '':
gui_hook = clear_inputhook
else:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, guis.keys())
raise ValueError(e)
return gui_hook(app)
__all__ = [
"GUI_WX",
"GUI_QT",
"GUI_QT4",
"GUI_QT5",
"GUI_GTK",
"GUI_TK",
"GUI_OSX",
"GUI_GLUT",
"GUI_PYGLET",
"GUI_GTK3",
"GUI_NONE",
"ignore_CTRL_C",
"allow_CTRL_C",
"InputHookManager",
"inputhook_manager",
"enable_wx",
"disable_wx",
"enable_qt",
"enable_qt4",
"disable_qt4",
"enable_qt5",
"disable_qt5",
"enable_gtk",
"disable_gtk",
"enable_tk",
"disable_tk",
"enable_glut",
"disable_glut",
"enable_pyglet",
"disable_pyglet",
"enable_gtk3",
"disable_gtk3",
"enable_mac",
"disable_mac",
"clear_inputhook",
"set_inputhook",
"current_gui",
"clear_app_refs",
"stdin_ready",
"set_return_control_callback",
"get_return_control_callback",
"get_inputhook",
"enable_gui"]
| apache-2.0 |
rafael-radkowski/ME325 | ME325Common/InputHelpers.py | 1 | 2465 | import platform
import matplotlib
if platform.system() == 'Darwin':
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.patches import Circle, Arc
# tkinter for the display
from tkinter import *
from tkinter import Canvas
from tkinter import Tk, BOTH, W, N, E, S
from tkinter.ttk import Frame, Button, Label, Scale, Checkbutton
import tkinter as tk
class DataEntryMenu():
__toplevel_frame = 0
__window = 0
__callback = None
# dictionary for output data
__dict = 0
__entry_dict = 0
def __init__(self, frame, callback):
self.__toplevel_frame = frame
self.__callback = callback
self.__dict = dict()
self.__entry_dict = dict()
def create(self, title, items):
self.__window = Toplevel(self.__toplevel_frame)
tk.Label(self.__window, text=title, background='white', font="Helvetica 14 bold").grid(sticky=NW, row=0, column=0)
n = len(items)
for i in range(n):
self.__dict[items[i]] = StringVar()
tk.Label(self.__window, text=items[i], background='white').grid(sticky=NW, row=i+1, column=0)
e = Entry(self.__window, textvariable=self.__dict[items[i]], width=15)
e.grid(sticky=NW, row=i+1, column=1)
self.__entry_dict[items[i]] = e
tk.Button(self.__window, text="Close", command=self.__destroyed_callback,
background='white').grid(sticky=NW, row=n+1, column=0, padx=7, pady=7)
tk.Button(self.__window, text="Use", command=self.__callback,
background='white').grid(sticky=NE, row=n + 1, column=1, padx=7, pady=7)
def get(self):
return self.__dict
def set(self, items_dict):
try:
keys = items_dict.keys()
n = len(keys)
for key, value in items_dict.items():
self.__dict[key].set(str(value))
except ValueError:
print("Something went wrong - invalid values")
except KeyError:
return
def set_readonly(self, readonly_key):
for key, value in self.__entry_dict.items():
if key == readonly_key:
value.configure(state='disabled')
else:
value.configure(state='normal')
def __destroyed_callback(self):
self.__window.destroy()
self.__window = None
| mit |
MdAsifKhan/DNGR-Keras | example.py | 1 | 1675 | import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
import networkx as nx
from sklearn.cluster import KMeans
import matplotlib.colors as colors
from itertools import cycle
import time
import matplotlib.pyplot as plt
import subprocess
from utils import tsne
import pdb
import numpy as np
from sklearn.metrics import normalized_mutual_info_score as nmi
import scipy.io as sio
def cluster(data,true_labels,n_clusters=3):
km = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10)
km.fit(data)
km_means_labels = km.labels_
km_means_cluster_centers = km.cluster_centers_
km_means_labels_unique = np.unique(km_means_labels)
colors_ = cycle(colors.cnames.keys())
initial_dim = np.shape(data)[1]
data_2 = tsne(data,2,initial_dim,30)
plt.figure(figsize=(12, 6))
plt.scatter(data_2[:,0],data_2[:,1], c=true_labels)
plt.title('True Labels')
return km_means_labels
data_mat = sio.loadmat('wine_network.mat')
labels = sio.loadmat('wine_label.mat')
data_mat = data_mat['adjMat']
labels = labels['wine_label']
data_edge = nx.Graph(data_mat)
with open('wine.edgelist','wb') as f:
nx.write_weighted_edgelist(data_edge, f)
subprocess.call('~/DNGR-Keras/DNGR.py --graph_type '+'undirected'+' --input '+'wine.edgelist'+' --output '+'representation',shell=True)
df = pd.read_pickle('representation.pkl')
reprsn = df['embedding'].values
node_idx = df['node_id'].values
reprsn = [np.asarray(row,dtype='float32') for row in reprsn]
reprsn = np.array(reprsn, dtype='float32')
true_labels = [labels[int(node)][0] for node in node_idx]
true_labels = np.asarray(true_labels, dtype='int32')
cluster(reprsn,true_labels, n_clusters=3)
plt.show()
| mit |
zbanga/trading-with-python | lib/interactiveBrokers/histData.py | 76 | 6472 | '''
Created on May 8, 2013
Copyright: Jev Kuznetsov
License: BSD
Module for downloading historic data from IB
'''
import ib
import pandas as pd
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
import logger as logger
from pandas import DataFrame, Index
import os
import datetime as dt
import time
from time import sleep
from extra import timeFormat, dateFormat
class Downloader(object):
def __init__(self,debug=False):
self._log = logger.getLogger('DLD')
self._log.debug('Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pd.__version__,ib.version))
self.tws = ibConnection()
self._dataHandler = _HistDataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler,message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self,msg):
print '[debug]', msg
def requestData(self,contract,endDateTime,durationStr='1 D',barSizeSetting='30 secs',whatToShow='TRADES',useRTH=1,formatDate=1):
if isinstance(endDateTime,dt.datetime): # convert to string
endDateTime = endDateTime.strftime(timeFormat)
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol,endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(10)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId,contract,endDateTime,durationStr,barSizeSetting,whatToShow,useRTH,formatDate)
self._reqId+=1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time()-startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
# def getIntradayData(self,contract, dateTuple ):
# ''' get full day data on 1-s interval
# date: a tuple of (yyyy,mm,dd)
# '''
#
# openTime = dt.datetime(*dateTuple)+dt.timedelta(hours=16)
# closeTime = dt.datetime(*dateTuple)+dt.timedelta(hours=22)
#
# timeRange = pd.date_range(openTime,closeTime,freq='30min')
#
# datasets = []
#
# for t in timeRange:
# datasets.append(self.requestData(contract,t.strftime(timeFormat)))
#
# return pd.concat(datasets)
def disconnect(self):
self.tws.disconnect()
class _HistDataHandler(object):
''' handles incoming messages '''
def __init__(self,tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler,message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open':[],'high':[],'low':[],'close':[],'volume':[],'count':[],'WAP':[]}
def msgHandler(self,msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
if len(msg.date) > 8:
self._timestamp.append(dt.datetime.strptime(msg.date,timeFormat))
else:
self._timestamp.append(dt.datetime.strptime(msg.date,dateFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
''' return downloaded data as a DataFrame '''
df = DataFrame(data=self._data,index=Index(self._timestamp))
return df
class TimeKeeper(object):
'''
class for keeping track of previous requests, to satify the IB requirements
(max 60 requests / 10 min)
each time a requiest is made, a timestamp is added to a txt file in the user dir.
'''
def __init__(self):
self._log = logger.getLogger('TK')
dataDir = os.path.expanduser('~')+'/twpData'
if not os.path.exists(dataDir):
os.mkdir(dataDir)
self._timeFormat = "%Y%m%d %H:%M:%S"
self.dataFile = os.path.normpath(os.path.join(dataDir,'requests.txt'))
# Create file if it's missing
if not os.path.exists(self.dataFile):
open(self.dataFile,'w').close()
self._log.debug('Data file: {0}'.format(self.dataFile))
def addRequest(self):
''' adds a timestamp of current request'''
with open(self.dataFile,'a') as f:
f.write(dt.datetime.now().strftime(self._timeFormat)+'\n')
def nrRequests(self,timeSpan=600):
''' return number of requests in past timespan (s) '''
delta = dt.timedelta(seconds=timeSpan)
now = dt.datetime.now()
requests = 0
with open(self.dataFile,'r') as f:
lines = f.readlines()
for line in lines:
if now-dt.datetime.strptime(line.strip(),self._timeFormat) < delta:
requests+=1
if requests==0: # erase all contents if no requests are relevant
open(self.dataFile,'w').close()
self._log.debug('past requests: {0}'.format(requests))
return requests
if __name__ == '__main__':
from extra import createContract
dl = Downloader(debug=True) # historic data downloader class
contract = createContract('SPY') # create contract using defaults (STK,SMART,USD)
data = dl.requestData(contract,"20141208 16:00:00 EST") # request 30-second data bars up till now
data.to_csv('SPY.csv') # write data to csv
print 'Done' | bsd-3-clause |
niamoto/niamoto-core | niamoto/data_providers/plantnote_provider/plantnote_plot_occurrence_provider.py | 2 | 1454 | # coding: utf-8
from sqlalchemy import *
import pandas as pd
from niamoto.data_providers.base_plot_occurrence_provider import \
BasePlotOccurrenceProvider
class PlantnotePlotOccurrenceProvider(BasePlotOccurrenceProvider):
"""
Pl@ntnote Plot-Occurrence provider.
"""
def __init__(self, data_provider, plantnote_db_path):
super(PlantnotePlotOccurrenceProvider, self).__init__(data_provider)
self.plantnote_db_path = plantnote_db_path
def get_provider_plot_occurrence_dataframe(self):
db_str = 'sqlite:///{}'.format(self.plantnote_db_path)
eng = create_engine(db_str)
connection = eng.connect()
try:
metadata = MetaData()
metadata.reflect(eng)
plot_occ_table = metadata.tables['Inventaires']
sel = select([
plot_occ_table.c["ID Parcelle"].label("plot_id"),
plot_occ_table.c["ID Individus"].label(
"occurrence_id"
),
plot_occ_table.c["Identifiant"].label(
"occurrence_identifier"
),
])
df = pd.read_sql(
sel,
connection,
index_col=["plot_id", "occurrence_id"]
)
return df
except:
raise
finally:
if connection:
connection.close()
eng.dispose()
| gpl-3.0 |
jreback/pandas | pandas/tests/series/methods/test_argsort.py | 3 | 2248 | import numpy as np
import pytest
from pandas import Series, Timestamp, isna
import pandas._testing as tm
class TestSeriesArgsort:
def _check_accum_op(self, name, ser, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(ser).values, func(np.array(ser)), check_dtype=check_dtype
)
# with missing values
ts = ser.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
def test_argsort(self, datetime_series):
self._check_accum_op("argsort", datetime_series, check_dtype=False)
argsorted = datetime_series.argsort()
assert issubclass(argsorted.dtype.type, np.integer)
# GH#2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp(f"201301{i:02d}") for i in range(1, 6)])
assert s.dtype == "datetime64[ns]"
shifted = s.shift(-1)
assert shifted.dtype == "datetime64[ns]"
assert isna(shifted[4])
result = s.argsort()
expected = Series(range(5), dtype="int64")
tm.assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(list(range(4)) + [-1], dtype="int64")
tm.assert_series_equal(result, expected)
def test_argsort_stable(self):
s = Series(np.random.randint(0, 100, size=10000))
mindexer = s.argsort(kind="mergesort")
qindexer = s.argsort()
mexpected = np.argsort(s.values, kind="mergesort")
qexpected = np.argsort(s.values, kind="quicksort")
tm.assert_series_equal(mindexer.astype(np.intp), Series(mexpected))
tm.assert_series_equal(qindexer.astype(np.intp), Series(qexpected))
msg = (
r"ndarray Expected type <class 'numpy\.ndarray'>, "
r"found <class 'pandas\.core\.series\.Series'> instead"
)
with pytest.raises(AssertionError, match=msg):
tm.assert_numpy_array_equal(qindexer, mindexer)
def test_argsort_preserve_name(self, datetime_series):
result = datetime_series.argsort()
assert result.name == datetime_series.name
| bsd-3-clause |
agiovann/Constrained_NMF | use_cases/CaImAnpaper/train_net_cifar_SNIPER.py | 2 | 10134 | #!/usr/bin/env python
"""
Created on Thu Aug 24 12:30:19 2017
@author: agiovann
"""
'''From keras example of convnet on the MNIST dataset.
TRAIN ON DATA EXTRACTED FROM RESIDUALS WITH generate_GT script
'''
#%%
import cv2
import glob
try:
cv2.setNumThreads(1)
except:
print('Open CV is naturally single threaded')
try:
if __IPYTHON__:
print(1)
# this is used for debugging purposes only. allows to reload classes
# when changed
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
except NameError:
print('Not launched under iPython')
import caiman as cm
import numpy as np
import os
import time
import pylab as pl
import psutil
import sys
from ipyparallel import Client
from skimage.external.tifffile import TiffFile
import scipy
import copy
from caiman.utils.utils import download_demo
from caiman.base.rois import extract_binary_masks_blob
from caiman.utils.visualization import plot_contours, view_patches_bar
from caiman.source_extraction.cnmf import cnmf as cnmf
from caiman.motion_correction import MotionCorrect
from caiman.components_evaluation import estimate_components_quality
from caiman.components_evaluation import evaluate_components
from caiman.tests.comparison import comparison
from caiman.motion_correction import tile_and_correct, motion_correction_piecewise
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from sklearn.model_selection import train_test_split
from keras.layers import Dense, Dropout, Activation, Flatten
import json as simplejson
from keras.models import model_from_json
from sklearn.utils import class_weight as cw
from caiman.utils.image_preprocessing_keras import ImageDataGenerator
from keras.layers import merge
from keras.layers.core import Lambda
from keras.models import Model
import tensorflow as tf
#%%
def make_parallel(model, gpu_count):
def get_slice(data, idx, parts):
shape = tf.shape(data)
size = tf.concat([shape[:1] // parts, shape[1:]], axis=0)
stride = tf.concat([shape[:1] // parts, shape[1:] * 0], axis=0)
start = stride * idx
return tf.slice(data, start, size)
outputs_all = []
for i in range(len(model.outputs)):
outputs_all.append([])
# Place a copy of the model on each GPU, each getting a slice of the batch
for i in range(gpu_count):
with tf.device('/gpu:%d' % i):
with tf.name_scope('tower_%d' % i) as scope:
inputs = []
# Slice each input into a piece for processing on this GPU
for x in model.inputs:
input_shape = tuple(x.get_shape().as_list())[1:]
slice_n = Lambda(get_slice, output_shape=input_shape, arguments={
'idx': i, 'parts': gpu_count})(x)
inputs.append(slice_n)
outputs = model(inputs)
if not isinstance(outputs, list):
outputs = [outputs]
# Save all the outputs for merging back together later
for l in range(len(outputs)):
outputs_all[l].append(outputs[l])
# merge outputs on CPU
with tf.device('/cpu:0'):
merged = []
for outputs in outputs_all:
merged.append(merge(outputs, mode='concat', concat_axis=0))
return Model(input=model.inputs, output=merged)
#%%
# the data, shuffled and split between train and test sets
with np.load('use_cases/CaImAnpaper/groun_truth_sniper/residual_crops_sniper_selective.npz') as ld:
all_masks_gt = ld['all_masks_gt'][:]
labels_gt = ld['labels_gt']
all_masks_gt = all_masks_gt[labels_gt < 2]
labels_gt = labels_gt[labels_gt < 2]
#%%
batch_size = 128
num_classes = 2
epochs = 5
test_fraction = 0.25
augmentation = True
# input image dimensions
img_rows, img_cols = 50, 50
x_train, x_test, y_train, y_test = train_test_split(
all_masks_gt, labels_gt, test_size=test_fraction)
class_weight = cw.compute_class_weight('balanced', np.unique(y_train), y_train)
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
#x_train /= 255
#x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
#%%
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
#model = make_parallel(model, 2)
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=opt,
metrics=['accuracy'])
if augmentation:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
# featurewise_center=True,
# featurewise_std_normalization=True,
shear_range=0.3,
rotation_range=360,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=[0.8, 1.2],
horizontal_flip=True,
vertical_flip=True,
random_mult_range=[.25, 2]
)
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
history = model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs,
verbose=1,
class_weight=class_weight,
validation_data=(x_test, y_test))
else:
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
#%%
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#%% Save model and weights
import datetime
save_dir = 'use_cases/CaImAnpaper/net_models/'
model_name = str(datetime.datetime.now()).replace(' ', '-').replace(':', '-')
model_json = model.to_json()
json_path = os.path.join(save_dir, model_name + '.json')
with open(json_path, "w") as json_file:
json_file.write(simplejson.dumps(simplejson.loads(model_json), indent=4))
print('Saved trained model at %s ' % json_path)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name + '.h5')
model.save(model_path)
print('Saved trained model at %s ' % model_path)
#%% visualize_results
num_sampl = 30000
predictions = model.predict(
all_masks_gt[:num_sampl, :, :, None], batch_size=32, verbose=1)
cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:num_sampl, 0] >= 0.95)[
0]])).play(gain=3., magnification=5, fr=10)
#%%
cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:num_sampl, 1] >= 0.95)[
0]])).play(gain=3., magnification=5, fr=10)
#%%
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt[:num_sampl] == 0) & (
predictions[:num_sampl, 1] > 0.95))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt[:num_sampl] == 1) & (
predictions[:num_sampl, 0] > 0.95))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where(
(predictions[:num_sampl, 0] > 0.4) & (predictions[:num_sampl, 0] < 0.5))[0]].squeeze()))
#%% retrieve and test
json_file = open(json_path, 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights(model_path)
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
loaded_model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=opt,
metrics=['accuracy'])
print("Loaded model from disk")
score = loaded_model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
score = loaded_model.evaluate(x_train, y_train, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
#%%
from skimage.util.montage import montage2d
predictions = loaded_model.predict(
all_masks_gt[:num_sampl], batch_size=32, verbose=1)
cm.movie(np.squeeze(all_masks_gt[np.where(predictions[:num_sampl, 1] < 0.1)[0]])).play(
gain=3., magnification=5, fr=10)
#%%
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt[:num_sampl] == 0) & (
predictions[:num_sampl, 1] >= 0.5))[0]].squeeze()))
#%%
pl.imshow(montage2d(all_masks_gt[np.where((labels_gt == 1) & (
predictions[:num_sampl, 0] >= 0.5) & (predictions[:, 0] >= 0.5))[0]].squeeze()))
| gpl-2.0 |
isrohutamahopetechnik/MissionPlanner | Lib/site-packages/scipy/signal/filter_design.py | 53 | 63381 | """Filter design.
"""
import types
import warnings
import numpy
from numpy import atleast_1d, poly, polyval, roots, real, asarray, allclose, \
resize, pi, absolute, logspace, r_, sqrt, tan, log10, arctan, arcsinh, \
cos, exp, cosh, arccosh, ceil, conjugate, zeros, sinh
from numpy import mintypecode
from scipy import special, optimize
from scipy.misc import comb
class BadCoefficients(UserWarning):
pass
abs = absolute
def findfreqs(num, den, N):
ep = atleast_1d(roots(den))+0j
tz = atleast_1d(roots(num))+0j
if len(ep) == 0:
ep = atleast_1d(-1000)+0j
ez = r_['-1',numpy.compress(ep.imag >=0, ep,axis=-1), numpy.compress((abs(tz) < 1e5) & (tz.imag >=0),tz,axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3*abs(ez.real + integ)+1.5*ez.imag))+0.5)
lfreq = numpy.around(numpy.log10(0.1*numpy.min(abs(real(ez+integ))+2*ez.imag))-0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the numerator (b) and denominator (a) of a filter compute its
frequency response::
b[0]*(jw)**(nb-1) + b[1]*(jw)**(nb-2) + ... + b[nb-1]
H(w) = -------------------------------------------------------
a[0]*(jw)**(na-1) + a[1]*(jw)**(na-2) + ... + a[na-1]
Parameters
----------
b : ndarray
Numerator of a linear filter.
a : ndarray
Denominator of a linear filter.
worN : {None, int}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, the compute at that many frequencies. Otherwise, compute the
response at frequencies given in worN.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The frequencies at which h was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude.
"""
if worN is None:
w = findfreqs(b,a,200)
elif isinstance(worN, types.IntType):
N = worN
w = findfreqs(b,a,N)
else:
w = worN
w = atleast_1d(w)
s = 1j*w
h = polyval(b, s) / polyval(a, s)
if not plot is None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=0, plot=None):
"""
Compute the frequency response of a digital filter.
Given the numerator ``b`` and denominator ``a`` of a digital filter compute
its frequency response::
jw -jw -jmw
jw B(e) b[0] + b[1]e + .... + b[m]e
H(e) = ---- = ------------------------------------
jw -jw -jnw
A(e) a[0] + a[1]e + .... + a[n]e
Parameters
----------
b : ndarray
numerator of a linear filter
a : ndarray
denominator of a linear filter
worN : {None, int}, optional
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole : bool, optional
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is False, compute frequencies from 0 to 2*pi.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The frequencies at which h was computed.
h : ndarray
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude.
Examples
--------
>>> b = firwin(80, 0.5, window=('kaiser', 8))
>>> h, w = freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.semilogy(h, np.abs(w), 'b')
>>> plt.ylabel('Amplitude (dB)', color='b')
>>> plt.xlabel('Frequency (rad/sample)')
>>> plt.grid()
>>> plt.legend()
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(w))
>>> plt.plot(h, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.show()
"""
b, a = map(atleast_1d, (b,a))
if whole:
lastpoint = 2*pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.arange(0,lastpoint,lastpoint/N)
elif isinstance(worN, types.IntType):
N = worN
w = numpy.arange(0,lastpoint,lastpoint/N)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j*w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if not plot is None:
plot(w, h)
return w, h
def tf2zpk(b, a):
"""Return zero, pole, gain (z,p,k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : ndarray
Numerator polynomial.
a : ndarray
Denominator polynomial.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
If some values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b,a = normalize(b,a)
b = (b+0.0) / a[0]
a = (a+0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""Return polynomial transfer function representation from zeros
and poles
Parameters
----------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial.
a : ndarray
Denominator polynomial.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1]+1), temp.dtype.char)
if len(k) == 1:
k = [k[0]]*z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
return b, a
def normalize(b, a):
"""Normalize polynomial representation of a transfer function.
If values of b are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b,a = map(atleast_1d,(b,a))
if len(a.shape) != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if len(b.shape) > 2:
raise ValueError("Numerator polynomial must be rank-1 or rank-2 array.")
if len(b.shape) == 1:
b = asarray([b],b.dtype.char)
while a[0] == 0.0 and len(a) > 1:
a = a[1:]
outb = b * (1.0) / a[0]
outa = a * (1.0) / a[0]
if allclose(outb[:,0], 0, rtol=1e-14):
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
while allclose(outb[:,0], 0, rtol=1e-14) and (outb.shape[-1] > 1):
outb = outb[:,1:]
if outb.shape[0] == 1:
outb = outb[0]
return outb, outa
def lp2lp(b, a, wo=1.0):
"""Return a low-pass filter with cutoff frequency `wo`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d,n))
pwo = pow(wo,numpy.arange(M-1,-1,-1))
start1 = max((n-d,0))
start2 = max((d-n,0))
b = b * pwo[start1]/pwo[start2:]
a = a * pwo[start1]/pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""Return a high-pass filter with cutoff frequency `wo`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo,numpy.arange(max((d,n))))
else:
pwo = numpy.ones(max((d,n)),b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b,(d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a,(n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""Return a band-pass filter with center frequency `wo` and bandwidth `bw`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a,b))
ma = max([N,D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
wosq = wo*wo
for j in range(Np+1):
val = 0.0
for i in range(0,N+1):
for k in range(0,i+1):
if ma-i+2*k == j:
val += comb(i,k)*b[N-i]*(wosq)**(i-k) / bw**i
bprime[Np-j] = val
for j in range(Dp+1):
val = 0.0
for i in range(0,D+1):
for k in range(0,i+1):
if ma-i+2*k == j:
val += comb(i,k)*a[D-i]*(wosq)**(i-k) / bw**i
aprime[Dp-j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1, bw=1):
"""Return a band-stop filter with center frequency `wo` and bandwidth `bw`
from a low-pass filter prototype with unity cutoff frequency.
"""
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a,b))
M = max([N,D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
wosq = wo*wo
for j in range(Np+1):
val = 0.0
for i in range(0,N+1):
for k in range(0,M-i+1):
if i+2*k == j:
val += comb(M-i,k)*b[N-i]*(wosq)**(M-i-k) * bw**i
bprime[Np-j] = val
for j in range(Dp+1):
val = 0.0
for i in range(0,D+1):
for k in range(0,M-i+1):
if i+2*k == j:
val += comb(M-i,k)*a[D-i]*(wosq)**(M-i-k) * bw**i
aprime[Dp-j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog filter using the bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1``) for ``s``.
"""
fs =float(fs)
a,b = map(atleast_1d,(a,b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N,D])
Np = M
Dp = M
bprime = numpy.zeros(Np+1,artype)
aprime = numpy.zeros(Dp+1,artype)
for j in range(Np+1):
val = 0.0
for i in range(N+1):
for k in range(i+1):
for l in range(M-i+1):
if k+l == j:
val += comb(i,k)*comb(M-i,l)*b[N-i]*pow(2*fs,i)*(-1)**k
bprime[j] = real(val)
for j in range(Dp+1):
val = 0.0
for i in range(D+1):
for k in range(i+1):
for l in range(M-i+1):
if k+l == j:
val += comb(i,k)*comb(M-i,l)*a[D-i]*pow(2*fs,i)*(-1)**k
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=0, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba') or pole-zero ('zpk') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
ftype : str, optional
The type of IIR filter to design:
- elliptic : 'ellip'
- Butterworth : 'butter',
- Chebyshev I : 'cheby1',
- Chebyshev II: 'cheby2',
- Bessel : 'bessel'
output : ['ba', 'zpk'], optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
Returns
-------
b, a :
Numerator and denominator of the IIR filter. Only returned if
``output='ba'``.
z, p, k : Zeros, poles, and gain of the IIR filter. Only returned if
``output='zpk'``.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError("%s does not have order selection use iirfilter function." % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2*(len(wp)-1)
band_type +=1
if wp[0] >= ws[0]:
band_type += 1
btype = {1:'lowpass', 2:'highpass', 3:'bandstop', 4:'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=0, ftype='butter', output='ba'):
"""IIR digital and analog filter design given order and critical points.
Design an Nth order lowpass digital or analog filter and return the filter
coefficients in (B,A) (numerator, denominator) or (Z,P,K) form.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
rp : float, optional
For Chebyshev and elliptic filters provides the maximum ripple
in the passband.
rs : float, optional
For chebyshev and elliptic filters provides the minimum attenuation in
the stop band.
btype : str, optional
The type of filter (lowpass, highpass, bandpass, bandstop).
Default is bandpass.
analog : int, optional
Non-zero to return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- elliptic : 'ellip'
- Butterworth : 'butter',
- Chebyshev I : 'cheby1',
- Chebyshev II: 'cheby2',
- Bessel : 'bessel'
output : ['ba', 'zpk'], optional
Type of output: numerator/denominator ('ba') or pole-zero ('zpk').
Default is 'ba'.
See Also
--------
butterord, cheb1ord, cheb2ord, ellipord
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("%s is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("%s is not a valid basic iir filter." % ftype)
if output not in ['ba', 'zpk']:
raise ValueError("%s is not a valid output form." % output)
#pre-warp frequencies for digital filter design
if not analog:
fs = 2.0
warped = 2*fs*tan(pi*Wn/fs)
else:
warped = Wn
# convert to low-pass prototype
if btype in ['lowpass', 'highpass']:
wo = warped
else:
bw = warped[1] - warped[0]
wo = sqrt(warped[0]*warped[1])
# Get analog lowpass prototype
if typefunc in [buttap, besselap]:
z, p, k = typefunc(N)
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband atteunatuion (rs) must be provided to design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
else: # Elliptic filters
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an elliptic filter.")
z, p, k = typefunc(N, rp, rs)
b, a = zpk2tf(z,p,k)
# transform to lowpass, bandpass, highpass, or bandstop
if btype == 'lowpass':
b, a = lp2lp(b,a,wo=wo)
elif btype == 'highpass':
b, a = lp2hp(b,a,wo=wo)
elif btype == 'bandpass':
b, a = lp2bp(b,a,wo=wo,bw=bw)
else: # 'bandstop'
b, a = lp2bs(b,a,wo=wo,bw=bw)
# Find discrete equivalent if necessary
if not analog:
b, a = bilinear(b, a, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return tf2zpk(b,a)
else:
return b,a
def butter(N, Wn, btype='low', analog=0, output='ba'):
"""Butterworth digital and analog filter design.
Design an Nth order lowpass digital or analog Butterworth filter and return
the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
buttord.
"""
return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=0, output='ba'):
"""Chebyshev type I digital and analog filter design.
Design an Nth order lowpass digital or analog Chebyshev type I filter and
return the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
cheb1ord.
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog, output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=0, output='ba'):
"""Chebyshev type I digital and analog filter design.
Design an Nth order lowpass digital or analog Chebyshev type I filter and
return the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
cheb2ord.
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog, output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=0, output='ba'):
"""Elliptic (Cauer) digital and analog filter design.
Design an Nth order lowpass digital or analog elliptic filter and return
the filter coefficients in (B,A) or (Z,P,K) form.
See also
--------
ellipord.
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog, output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=0, output='ba'):
"""Bessel digital and analog filter design.
Design an Nth order lowpass digital or analog Bessel filter and return the
filter coefficients in (B,A) or (Z,P,K) form.
"""
return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='bessel')
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp :
Edge of passband `passb`.
ind : int
Index specifying which `passb` edge to vary (0 or 1).
passb : array_like
Two element sequence of fixed passband edges.
stopb : array_like
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : ['butter', 'cheby', 'ellip']
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = stopb*(passbC[0]-passbC[1]) / (stopb**2 - passbC[0]*passbC[1])
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
n = (log10((GSTOP-1.0)/(GPASS-1.0)) / (2*log10(nat)))
elif type == 'cheby':
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
n = arccosh(sqrt((GSTOP-1.0)/(GPASS-1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10**(0.1*gstop)
GPASS = 10**(0.1*gpass)
arg1 = sqrt( (GPASS-1.0) / (GSTOP-1.0) )
arg0 = 1.0 / nat
d0 = special.ellipk([arg0**2, 1-arg0**2])
d1 = special.ellipk([arg1**2, 1-arg1**2])
n = (d0[0]*d1[1] / (d0[1]*d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=0):
"""Butterworth filter order selection.
Return the order of the lowest order digital Butterworth filter that loses
no more than `gpass` dB in the passband and has at least `gstop` dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
filter_type +=1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies
if not analog:
passb = tan(wp*pi/2.0)
stopb = tan(ws*pi/2.0)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'butter'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil( log10((GSTOP-1.0)/(GPASS-1.0)) / (2*log10(nat))))
# Find the butterworth natural frequency W0 (or the "3dB" frequency")
# to give exactly gstop at nat. W0 will be between 1 and nat
try:
W0 = nat / ( ( 10**(0.1*abs(gstop))-1)**(1.0/(2.0*ord)))
except ZeroDivisionError:
W0 = nat
print "Warning, order is zero...check input parametegstop."
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0*passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2,float)
WN[0] = ((passb[1] - passb[0]) + sqrt((passb[1] - passb[0])**2 + \
4*W0**2 * passb[0] * passb[1])) / (2*W0)
WN[1] = ((passb[1] - passb[0]) - sqrt((passb[1] - passb[0])**2 + \
4*W0**2 * passb[0] * passb[1])) / (2*W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0],float)
WN = -W0 * (passb[1]-passb[0]) / 2.0 + sqrt(W0**2 / 4.0 * \
(passb[1]-passb[0])**2 + \
passb[0]*passb[1])
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0/pi)*arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=0):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital Chebyshev Type I filter that
loses no more than `gpass` dB in the passband and has at least `gstop` dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-wagpass frequencies
if not analog:
passb = tan(pi*wp/2.)
stopb = tan(pi*ws/2.)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'cheby'), disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'cheby'), disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP-1.0) / (GPASS-1.0))) / arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0/pi)*arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=0):
"""Chebyshev type II filter order selection.
Description:
Return the order of the lowest order digital Chebyshev Type II filter
that loses no more than gpass dB in the passband and has at least gstop dB
attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-wagpass frequencies
if not analog:
passb = tan(pi*wp/2.0)
stopb = tan(pi*ws/2.0)
else:
passb = wp*1.0
stopb = ws*1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'cheby'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*abs(gstop))
GPASS = 10**(0.1*abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP-1.0) / (GPASS-1.0))) / arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0/ord * arccosh(sqrt((GSTOP-1.0)/(GPASS-1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2,float)
nat[0] = new_freq / 2.0 * (passb[0]-passb[1]) + \
sqrt(new_freq**2 * (passb[1]-passb[0])**2 / 4.0 + \
passb[1] * passb[0])
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2,float)
nat[0] = 1.0/(2.0*new_freq) * (passb[0] - passb[1]) + \
sqrt((passb[1]-passb[0])**2 / (4.0*new_freq**2) + \
passb[1] * passb[0])
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0/pi)*arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=0):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital elliptic filter that loses no
more than gpass dB in the passband and has at least gstop dB attenuation in
the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies, normalized from 0 to 1 (1
corresponds to pi radians / sample). For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : int, optional
Non-zero to design an analog filter (in this case `wp` and `ws` are in
radians / second).
Returns
------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.-
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2*(len(wp)-1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-wagpass frequencies
if analog:
passb = wp*1.0
stopb = ws*1.0
else:
passb = tan(wp*pi/2.0)
stopb = tan(ws*pi/2.0)
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0]-1e-12,
args=(0,passb,stopb,gpass,gstop,'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1]+1e-12, passb[1],
args=(1,passb,stopb,gpass,gstop,'ellip'),
disp=0)
passb[1] = wp1
nat = (stopb * (passb[0]-passb[1])) / (stopb**2 - passb[0]*passb[1])
elif filter_type == 4: # pass
nat = (stopb**2 - passb[0]*passb[1]) / (stopb* (passb[0]-passb[1]))
nat = min(abs(nat))
GSTOP = 10**(0.1*gstop)
GPASS = 10**(0.1*gpass)
arg1 = sqrt( (GPASS-1.0) / (GSTOP-1.0) )
arg0 = 1.0 / nat
d0 = special.ellipk([arg0**2, 1-arg0**2])
d1 = special.ellipk([arg1**2, 1-arg1**2])
ord = int(ceil(d0[0]*d1[1] / (d0[1]*d1[0])))
if not analog:
wn = arctan(passb)*2.0/pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) zero, pole, gain for analog prototype of an Nth
order Butterworth filter."""
z = []
n = numpy.arange(1,N+1)
p = numpy.exp(1j*(2*n-1)/(2.0*N)*pi)*1j
k = 1
return z, p, k
def cheb1ap(N, rp):
"""Return (z,p,k) zero, pole, gain for Nth order Chebyshev type I lowpass
analog filter prototype with `rp` decibels of ripple in the passband.
"""
z = []
eps = numpy.sqrt(10**(0.1*rp)-1.0)
n = numpy.arange(1,N+1)
mu = 1.0/N * numpy.log((1.0+numpy.sqrt(1+eps*eps)) / eps)
theta = pi/2.0 * (2*n-1.0)/N
p = -numpy.sinh(mu)*numpy.sin(theta) + 1j*numpy.cosh(mu)*numpy.cos(theta)
k = numpy.prod(-p,axis=0).real
if N % 2 == 0:
k = k / sqrt((1+eps*eps))
return z, p, k
pass
def cheb2ap(N, rs):
"""Return (z,p,k) zero, pole, gain for Nth order Chebyshev type II lowpass
analog filter prototype with `rs` decibels of ripple in the stopband.
"""
de = 1.0/sqrt(10**(0.1*rs)-1)
mu = arcsinh(1.0/de)/N
if N % 2:
m = N - 1
n = numpy.concatenate((numpy.arange(1,N-1,2),numpy.arange(N+2,2*N,2)))
else:
m = N
n = numpy.arange(1,2*N,2)
z = conjugate(1j / cos(n*pi/(2.0*N)))
p = exp(1j*(pi*numpy.arange(1,2*N,2)/(2.0*N) + pi/2.0))
p = sinh(mu) * p.real + 1j*cosh(mu)*p.imag
p = 1.0 / p
k = (numpy.prod(-p,axis=0)/numpy.prod(-z,axis=0)).real
return z, p, k
EPSILON = 2e-16
def vratio(u, ineps, mp):
[s,c,d,phi] = special.ellipj(u,mp)
ret = abs(ineps - s/c)
return ret
def kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m,1-m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) zeros, poles, and gain of an Nth order normalized
prototype elliptic analog lowpass filter with `rp` decibels of ripple in
the passband and a stopband `rs` decibels down.
References
----------
Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5
and 12.
"""
if N == 1:
p = -sqrt(1.0/(10**(0.1*rp)-1.0))
k = -p
z = []
return z, p, k
eps = numpy.sqrt(10**(0.1*rp)-1)
ck1 = eps / numpy.sqrt(10**(0.1*rs)-1)
ck1p = numpy.sqrt(1-ck1*ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs specifications.")
wp = 1
val = special.ellipk([ck1*ck1,ck1p*ck1p])
if abs(1-ck1p*ck1p) < EPSILON:
krat = 0
else:
krat = N*val[0] / val[1]
m = optimize.fmin(kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
ws = wp / sqrt(m)
m1 = 1-m
j = numpy.arange(1-N%2,N,2)
jj = len(j)
[s,c,d,phi] = special.ellipj(j*capk/N,m*numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s,axis=-1)
z = 1.0 / (sqrt(m)*snew)
z = 1j*z
z = numpy.concatenate((z,conjugate(z)))
r = optimize.fmin(vratio, special.ellipk(m), args=(1./eps, ck1p*ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N*val[0])
[sv,cv,dv,phi] = special.ellipj(v0,1-m)
p = -(c*d*sv*cv + 1j*s*dv) / (1-(d*sv)**2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON*numpy.sqrt(numpy.sum(p*numpy.conjugate(p),axis=0).real), p,axis=-1)
p = numpy.concatenate((p,conjugate(newp)))
else:
p = numpy.concatenate((p,conjugate(p)))
k = (numpy.prod(-p,axis=0) / numpy.prod(-z,axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1+eps*eps))
return z, p, k
def besselap(N):
"""Return (z,p,k) zero, pole, gain for analog prototype of an Nth order
Bessel filter."""
z = []
k = 1
if N == 0:
p = [];
elif N == 1:
p = [-1]
elif N == 2:
p = [-.8660254037844386467637229+.4999999999999999999999996*1j,
-.8660254037844386467637229-.4999999999999999999999996*1j]
elif N == 3:
p = [-.9416000265332067855971980,
-.7456403858480766441810907-.7113666249728352680992154*1j,
-.7456403858480766441810907+.7113666249728352680992154*1j]
elif N == 4:
p = [-.6572111716718829545787781-.8301614350048733772399715*1j,
-.6572111716718829545787788+.8301614350048733772399715*1j,
-.9047587967882449459642637-.2709187330038746636700923*1j,
-.9047587967882449459642624+.2709187330038746636700926*1j]
elif N == 5:
p = [-.9264420773877602247196260,
-.8515536193688395541722677-.4427174639443327209850002*1j,
-.8515536193688395541722677+.4427174639443327209850002*1j,
-.5905759446119191779319432-.9072067564574549539291747*1j,
-.5905759446119191779319432+.9072067564574549539291747*1j]
elif N == 6:
p = [-.9093906830472271808050953-.1856964396793046769246397*1j,
-.9093906830472271808050953+.1856964396793046769246397*1j,
-.7996541858328288520243325-.5621717346937317988594118*1j,
-.7996541858328288520243325+.5621717346937317988594118*1j,
-.5385526816693109683073792-.9616876881954277199245657*1j,
-.5385526816693109683073792+.9616876881954277199245657*1j]
elif N == 7:
p = [-.9194871556490290014311619,
-.8800029341523374639772340-.3216652762307739398381830*1j,
-.8800029341523374639772340+.3216652762307739398381830*1j,
-.7527355434093214462291616-.6504696305522550699212995*1j,
-.7527355434093214462291616+.6504696305522550699212995*1j,
-.4966917256672316755024763-1.002508508454420401230220*1j,
-.4966917256672316755024763+1.002508508454420401230220*1j]
elif N == 8:
p = [-.9096831546652910216327629-.1412437976671422927888150*1j,
-.9096831546652910216327629+.1412437976671422927888150*1j,
-.8473250802359334320103023-.4259017538272934994996429*1j,
-.8473250802359334320103023+.4259017538272934994996429*1j,
-.7111381808485399250796172-.7186517314108401705762571*1j,
-.7111381808485399250796172+.7186517314108401705762571*1j,
-.4621740412532122027072175-1.034388681126901058116589*1j,
-.4621740412532122027072175+1.034388681126901058116589*1j]
elif N == 9:
p = [-.9154957797499037686769223,
-.8911217017079759323183848-.2526580934582164192308115*1j,
-.8911217017079759323183848+.2526580934582164192308115*1j,
-.8148021112269012975514135-.5085815689631499483745341*1j,
-.8148021112269012975514135+.5085815689631499483745341*1j,
-.6743622686854761980403401-.7730546212691183706919682*1j,
-.6743622686854761980403401+.7730546212691183706919682*1j,
-.4331415561553618854685942-1.060073670135929666774323*1j,
-.4331415561553618854685942+1.060073670135929666774323*1j]
elif N == 10:
p = [-.9091347320900502436826431-.1139583137335511169927714*1j,
-.9091347320900502436826431+.1139583137335511169927714*1j,
-.8688459641284764527921864-.3430008233766309973110589*1j,
-.8688459641284764527921864+.3430008233766309973110589*1j,
-.7837694413101441082655890-.5759147538499947070009852*1j,
-.7837694413101441082655890+.5759147538499947070009852*1j,
-.6417513866988316136190854-.8175836167191017226233947*1j,
-.6417513866988316136190854+.8175836167191017226233947*1j,
-.4083220732868861566219785-1.081274842819124562037210*1j,
-.4083220732868861566219785+1.081274842819124562037210*1j]
elif N == 11:
p = [-.9129067244518981934637318,
-.8963656705721166099815744-.2080480375071031919692341*1j
-.8963656705721166099815744+.2080480375071031919692341*1j,
-.8453044014712962954184557-.4178696917801248292797448*1j,
-.8453044014712962954184557+.4178696917801248292797448*1j,
-.7546938934722303128102142-.6319150050721846494520941*1j,
-.7546938934722303128102142+.6319150050721846494520941*1j,
-.6126871554915194054182909-.8547813893314764631518509*1j,
-.6126871554915194054182909+.8547813893314764631518509*1j,
-.3868149510055090879155425-1.099117466763120928733632*1j,
-.3868149510055090879155425+1.099117466763120928733632*1j]
elif N == 12:
p = [-.9084478234140682638817772-95506365213450398415258360.0e-27*1j,
-.9084478234140682638817772+95506365213450398415258360.0e-27*1j,
-.8802534342016826507901575-.2871779503524226723615457*1j,
-.8802534342016826507901575+.2871779503524226723615457*1j,
-.8217296939939077285792834-.4810212115100676440620548*1j,
-.8217296939939077285792834+.4810212115100676440620548*1j,
-.7276681615395159454547013-.6792961178764694160048987*1j,
-.7276681615395159454547013+.6792961178764694160048987*1j,
-.5866369321861477207528215-.8863772751320727026622149*1j,
-.5866369321861477207528215+.8863772751320727026622149*1j,
-.3679640085526312839425808-1.114373575641546257595657*1j,
-.3679640085526312839425808+1.114373575641546257595657*1j]
elif N == 13:
p = [-.9110914665984182781070663,
-.8991314665475196220910718-.1768342956161043620980863*1j,
-.8991314665475196220910718+.1768342956161043620980863*1j,
-.8625094198260548711573628-.3547413731172988997754038*1j,
-.8625094198260548711573628+.3547413731172988997754038*1j,
-.7987460692470972510394686-.5350752120696801938272504*1j,
-.7987460692470972510394686+.5350752120696801938272504*1j,
-.7026234675721275653944062-.7199611890171304131266374*1j,
-.7026234675721275653944062+.7199611890171304131266374*1j,
-.5631559842430199266325818-.9135900338325109684927731*1j,
-.5631559842430199266325818+.9135900338325109684927731*1j,
-.3512792323389821669401925-1.127591548317705678613239*1j,
-.3512792323389821669401925+1.127591548317705678613239*1j]
elif N == 14:
p = [-.9077932138396487614720659-82196399419401501888968130.0e-27*1j,
-.9077932138396487614720659+82196399419401501888968130.0e-27*1j,
-.8869506674916445312089167-.2470079178765333183201435*1j,
-.8869506674916445312089167+.2470079178765333183201435*1j,
-.8441199160909851197897667-.4131653825102692595237260*1j,
-.8441199160909851197897667+.4131653825102692595237260*1j,
-.7766591387063623897344648-.5819170677377608590492434*1j,
-.7766591387063623897344648+.5819170677377608590492434*1j,
-.6794256425119233117869491-.7552857305042033418417492*1j,
-.6794256425119233117869491+.7552857305042033418417492*1j,
-.5418766775112297376541293-.9373043683516919569183099*1j,
-.5418766775112297376541293+.9373043683516919569183099*1j,
-.3363868224902037330610040-1.139172297839859991370924*1j,
-.3363868224902037330610040+1.139172297839859991370924*1j]
elif N == 15:
p = [-.9097482363849064167228581,
-.9006981694176978324932918-.1537681197278439351298882*1j,
-.9006981694176978324932918+.1537681197278439351298882*1j,
-.8731264620834984978337843-.3082352470564267657715883*1j,
-.8731264620834984978337843+.3082352470564267657715883*1j,
-.8256631452587146506294553-.4642348752734325631275134*1j,
-.8256631452587146506294553+.4642348752734325631275134*1j,
-.7556027168970728127850416-.6229396358758267198938604*1j,
-.7556027168970728127850416+.6229396358758267198938604*1j,
-.6579196593110998676999362-.7862895503722515897065645*1j,
-.6579196593110998676999362+.7862895503722515897065645*1j,
-.5224954069658330616875186-.9581787261092526478889345*1j,
-.5224954069658330616875186+.9581787261092526478889345*1j,
-.3229963059766444287113517-1.149416154583629539665297*1j,
-.3229963059766444287113517+1.149416154583629539665297*1j]
elif N == 16:
p = [-.9072099595087001356491337-72142113041117326028823950.0e-27*1j,
-.9072099595087001356491337+72142113041117326028823950.0e-27*1j,
-.8911723070323647674780132-.2167089659900576449410059*1j,
-.8911723070323647674780132+.2167089659900576449410059*1j,
-.8584264231521330481755780-.3621697271802065647661080*1j,
-.8584264231521330481755780+.3621697271802065647661080*1j,
-.8074790293236003885306146-.5092933751171800179676218*1j,
-.8074790293236003885306146+.5092933751171800179676218*1j,
-.7356166304713115980927279-.6591950877860393745845254*1j,
-.7356166304713115980927279+.6591950877860393745845254*1j,
-.6379502514039066715773828-.8137453537108761895522580*1j,
-.6379502514039066715773828+.8137453537108761895522580*1j,
-.5047606444424766743309967-.9767137477799090692947061*1j,
-.5047606444424766743309967+.9767137477799090692947061*1j,
-.3108782755645387813283867-1.158552841199330479412225*1j,
-.3108782755645387813283867+1.158552841199330479412225*1j]
elif N == 17:
p = [-.9087141161336397432860029,
-.9016273850787285964692844-.1360267995173024591237303*1j,
-.9016273850787285964692844+.1360267995173024591237303*1j,
-.8801100704438627158492165-.2725347156478803885651973*1j,
-.8801100704438627158492165+.2725347156478803885651973*1j,
-.8433414495836129204455491-.4100759282910021624185986*1j,
-.8433414495836129204455491+.4100759282910021624185986*1j,
-.7897644147799708220288138-.5493724405281088674296232*1j,
-.7897644147799708220288138+.5493724405281088674296232*1j,
-.7166893842372349049842743-.6914936286393609433305754*1j,
-.7166893842372349049842743+.6914936286393609433305754*1j,
-.6193710717342144521602448-.8382497252826992979368621*1j,
-.6193710717342144521602448+.8382497252826992979368621*1j,
-.4884629337672704194973683-.9932971956316781632345466*1j,
-.4884629337672704194973683+.9932971956316781632345466*1j,
-.2998489459990082015466971-1.166761272925668786676672*1j,
-.2998489459990082015466971+1.166761272925668786676672*1j]
elif N == 18:
p = [-.9067004324162775554189031-64279241063930693839360680.0e-27*1j,
-.9067004324162775554189031+64279241063930693839360680.0e-27*1j,
-.8939764278132455733032155-.1930374640894758606940586*1j,
-.8939764278132455733032155+.1930374640894758606940586*1j,
-.8681095503628830078317207-.3224204925163257604931634*1j,
-.8681095503628830078317207+.3224204925163257604931634*1j,
-.8281885016242836608829018-.4529385697815916950149364*1j,
-.8281885016242836608829018+.4529385697815916950149364*1j,
-.7726285030739558780127746-.5852778162086640620016316*1j,
-.7726285030739558780127746+.5852778162086640620016316*1j,
-.6987821445005273020051878-.7204696509726630531663123*1j,
-.6987821445005273020051878+.7204696509726630531663123*1j,
-.6020482668090644386627299-.8602708961893664447167418*1j,
-.6020482668090644386627299+.8602708961893664447167418*1j,
-.4734268069916151511140032-1.008234300314801077034158*1j,
-.4734268069916151511140032+1.008234300314801077034158*1j,
-.2897592029880489845789953-1.174183010600059128532230*1j,
-.2897592029880489845789953+1.174183010600059128532230*1j]
elif N == 19:
p = [-.9078934217899404528985092,
-.9021937639390660668922536-.1219568381872026517578164*1j,
-.9021937639390660668922536+.1219568381872026517578164*1j,
-.8849290585034385274001112-.2442590757549818229026280*1j,
-.8849290585034385274001112+.2442590757549818229026280*1j,
-.8555768765618421591093993-.3672925896399872304734923*1j,
-.8555768765618421591093993+.3672925896399872304734923*1j,
-.8131725551578197705476160-.4915365035562459055630005*1j,
-.8131725551578197705476160+.4915365035562459055630005*1j,
-.7561260971541629355231897-.6176483917970178919174173*1j,
-.7561260971541629355231897+.6176483917970178919174173*1j,
-.6818424412912442033411634-.7466272357947761283262338*1j,
-.6818424412912442033411634+.7466272357947761283262338*1j,
-.5858613321217832644813602-.8801817131014566284786759*1j,
-.5858613321217832644813602+.8801817131014566284786759*1j,
-.4595043449730988600785456-1.021768776912671221830298*1j,
-.4595043449730988600785456+1.021768776912671221830298*1j,
-.2804866851439370027628724-1.180931628453291873626003*1j,
-.2804866851439370027628724+1.180931628453291873626003*1j]
elif N == 20:
p = [-.9062570115576771146523497-57961780277849516990208850.0e-27*1j,
-.9062570115576771146523497+57961780277849516990208850.0e-27*1j,
-.8959150941925768608568248-.1740317175918705058595844*1j,
-.8959150941925768608568248+.1740317175918705058595844*1j,
-.8749560316673332850673214-.2905559296567908031706902*1j,
-.8749560316673332850673214+.2905559296567908031706902*1j,
-.8427907479956670633544106-.4078917326291934082132821*1j,
-.8427907479956670633544106+.4078917326291934082132821*1j,
-.7984251191290606875799876-.5264942388817132427317659*1j,
-.7984251191290606875799876+.5264942388817132427317659*1j,
-.7402780309646768991232610-.6469975237605228320268752*1j,
-.7402780309646768991232610+.6469975237605228320268752*1j,
-.6658120544829934193890626-.7703721701100763015154510*1j,
-.6658120544829934193890626+.7703721701100763015154510*1j,
-.5707026806915714094398061-.8982829066468255593407161*1j,
-.5707026806915714094398061+.8982829066468255593407161*1j,
-.4465700698205149555701841-1.034097702560842962315411*1j,
-.4465700698205149555701841+1.034097702560842962315411*1j,
-.2719299580251652601727704-1.187099379810885886139638*1j,
-.2719299580251652601727704+1.187099379810885886139638*1j]
elif N == 21:
p = [-.9072262653142957028884077,
-.9025428073192696303995083-.1105252572789856480992275*1j,
-.9025428073192696303995083+.1105252572789856480992275*1j,
-.8883808106664449854431605-.2213069215084350419975358*1j,
-.8883808106664449854431605+.2213069215084350419975358*1j,
-.8643915813643204553970169-.3326258512522187083009453*1j,
-.8643915813643204553970169+.3326258512522187083009453*1j,
-.8299435470674444100273463-.4448177739407956609694059*1j,
-.8299435470674444100273463+.4448177739407956609694059*1j,
-.7840287980408341576100581-.5583186348022854707564856*1j,
-.7840287980408341576100581+.5583186348022854707564856*1j,
-.7250839687106612822281339-.6737426063024382240549898*1j,
-.7250839687106612822281339+.6737426063024382240549898*1j,
-.6506315378609463397807996-.7920349342629491368548074*1j,
-.6506315378609463397807996+.7920349342629491368548074*1j,
-.5564766488918562465935297-.9148198405846724121600860*1j,
-.5564766488918562465935297+.9148198405846724121600860*1j,
-.4345168906815271799687308-1.045382255856986531461592*1j,
-.4345168906815271799687308+1.045382255856986531461592*1j,
-.2640041595834031147954813-1.192762031948052470183960*1j,
-.2640041595834031147954813+1.192762031948052470183960*1j]
elif N == 22:
p = [-.9058702269930872551848625-52774908289999045189007100.0e-27*1j,
-.9058702269930872551848625+52774908289999045189007100.0e-27*1j,
-.8972983138153530955952835-.1584351912289865608659759*1j,
-.8972983138153530955952835+.1584351912289865608659759*1j,
-.8799661455640176154025352-.2644363039201535049656450*1j,
-.8799661455640176154025352+.2644363039201535049656450*1j,
-.8534754036851687233084587-.3710389319482319823405321*1j,
-.8534754036851687233084587+.3710389319482319823405321*1j,
-.8171682088462720394344996-.4785619492202780899653575*1j,
-.8171682088462720394344996+.4785619492202780899653575*1j,
-.7700332930556816872932937-.5874255426351153211965601*1j,
-.7700332930556816872932937+.5874255426351153211965601*1j,
-.7105305456418785989070935-.6982266265924524000098548*1j,
-.7105305456418785989070935+.6982266265924524000098548*1j,
-.6362427683267827226840153-.8118875040246347267248508*1j,
-.6362427683267827226840153+.8118875040246347267248508*1j,
-.5430983056306302779658129-.9299947824439872998916657*1j,
-.5430983056306302779658129+.9299947824439872998916657*1j,
-.4232528745642628461715044-1.055755605227545931204656*1j,
-.4232528745642628461715044+1.055755605227545931204656*1j,
-.2566376987939318038016012-1.197982433555213008346532*1j,
-.2566376987939318038016012+1.197982433555213008346532*1j]
elif N == 23:
p = [-.9066732476324988168207439,
-.9027564979912504609412993-.1010534335314045013252480*1j,
-.9027564979912504609412993+.1010534335314045013252480*1j,
-.8909283242471251458653994-.2023024699381223418195228*1j,
-.8909283242471251458653994+.2023024699381223418195228*1j,
-.8709469395587416239596874-.3039581993950041588888925*1j,
-.8709469395587416239596874+.3039581993950041588888925*1j,
-.8423805948021127057054288-.4062657948237602726779246*1j,
-.8423805948021127057054288+.4062657948237602726779246*1j,
-.8045561642053176205623187-.5095305912227258268309528*1j,
-.8045561642053176205623187+.5095305912227258268309528*1j,
-.7564660146829880581478138-.6141594859476032127216463*1j,
-.7564660146829880581478138+.6141594859476032127216463*1j,
-.6965966033912705387505040-.7207341374753046970247055*1j,
-.6965966033912705387505040+.7207341374753046970247055*1j,
-.6225903228771341778273152-.8301558302812980678845563*1j,
-.6225903228771341778273152+.8301558302812980678845563*1j,
-.5304922463810191698502226-.9439760364018300083750242*1j,
-.5304922463810191698502226+.9439760364018300083750242*1j,
-.4126986617510148836149955-1.065328794475513585531053*1j,
-.4126986617510148836149955+1.065328794475513585531053*1j,
-.2497697202208956030229911-1.202813187870697831365338*1j,
-.2497697202208956030229911+1.202813187870697831365338*1j]
elif N == 24:
p = [-.9055312363372773709269407-48440066540478700874836350.0e-27*1j,
-.9055312363372773709269407+48440066540478700874836350.0e-27*1j,
-.8983105104397872954053307-.1454056133873610120105857*1j,
-.8983105104397872954053307+.1454056133873610120105857*1j,
-.8837358034555706623131950-.2426335234401383076544239*1j,
-.8837358034555706623131950+.2426335234401383076544239*1j,
-.8615278304016353651120610-.3403202112618624773397257*1j,
-.8615278304016353651120610+.3403202112618624773397257*1j,
-.8312326466813240652679563-.4386985933597305434577492*1j,
-.8312326466813240652679563+.4386985933597305434577492*1j,
-.7921695462343492518845446-.5380628490968016700338001*1j,
-.7921695462343492518845446+.5380628490968016700338001*1j,
-.7433392285088529449175873-.6388084216222567930378296*1j,
-.7433392285088529449175873+.6388084216222567930378296*1j,
-.6832565803536521302816011-.7415032695091650806797753*1j,
-.6832565803536521302816011+.7415032695091650806797753*1j,
-.6096221567378335562589532-.8470292433077202380020454*1j,
-.6096221567378335562589532+.8470292433077202380020454*1j,
-.5185914574820317343536707-.9569048385259054576937721*1j,
-.5185914574820317343536707+.9569048385259054576937721*1j,
-.4027853855197518014786978-1.074195196518674765143729*1j,
-.4027853855197518014786978+1.074195196518674765143729*1j,
-.2433481337524869675825448-1.207298683731972524975429*1j,
-.2433481337524869675825448+1.207298683731972524975429*1j]
elif N == 25:
p = [-.9062073871811708652496104,
-.9028833390228020537142561-93077131185102967450643820.0e-27*1j,
-.9028833390228020537142561+93077131185102967450643820.0e-27*1j,
-.8928551459883548836774529-.1863068969804300712287138*1j,
-.8928551459883548836774529+.1863068969804300712287138*1j,
-.8759497989677857803656239-.2798521321771408719327250*1j,
-.8759497989677857803656239+.2798521321771408719327250*1j,
-.8518616886554019782346493-.3738977875907595009446142*1j,
-.8518616886554019782346493+.3738977875907595009446142*1j,
-.8201226043936880253962552-.4686668574656966589020580*1j,
-.8201226043936880253962552+.4686668574656966589020580*1j,
-.7800496278186497225905443-.5644441210349710332887354*1j,
-.7800496278186497225905443+.5644441210349710332887354*1j,
-.7306549271849967721596735-.6616149647357748681460822*1j,
-.7306549271849967721596735+.6616149647357748681460822*1j,
-.6704827128029559528610523-.7607348858167839877987008*1j,
-.6704827128029559528610523+.7607348858167839877987008*1j,
-.5972898661335557242320528-.8626676330388028512598538*1j,
-.5972898661335557242320528+.8626676330388028512598538*1j,
-.5073362861078468845461362-.9689006305344868494672405*1j,
-.5073362861078468845461362+.9689006305344868494672405*1j,
-.3934529878191079606023847-1.082433927173831581956863*1j,
-.3934529878191079606023847+1.082433927173831581956863*1j,
-.2373280669322028974199184-1.211476658382565356579418*1j,
-.2373280669322028974199184+1.211476658382565356579418*1j]
else:
raise ValueError("Bessel Filter not supported for order %d" % N)
return z, p, k
filter_dict = {'butter': [buttap,buttord],
'butterworth' : [buttap,buttord],
'cauer' : [ellipap,ellipord],
'elliptic' : [ellipap,ellipord],
'ellip' : [ellipap,ellipord],
'bessel' : [besselap],
'cheby1' : [cheb1ap, cheb1ord],
'chebyshev1' : [cheb1ap, cheb1ord],
'chebyshevi' : [cheb1ap, cheb1ord],
'cheby2' : [cheb2ap, cheb2ord],
'chebyshev2' : [cheb2ap, cheb2ord],
'chebyshevii' : [cheb2ap, cheb2ord]
}
band_dict = {'band':'bandpass',
'bandpass':'bandpass',
'pass' : 'bandpass',
'bp':'bandpass',
'bs':'bandstop',
'bandstop':'bandstop',
'bands' : 'bandstop',
'stop' : 'bandstop',
'l' : 'lowpass',
'low': 'lowpass',
'lowpass' : 'lowpass',
'high' : 'highpass',
'highpass' : 'highpass',
'h' : 'highpass'
}
warnings.simplefilter("always", BadCoefficients)
| gpl-3.0 |
glenioborges/ibis | ibis/util.py | 6 | 3963 | # Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import types
import ibis.compat as compat
def guid():
try:
from ibis.comms import uuid4_hex
return uuid4_hex()
except ImportError:
from uuid import uuid4
guid = uuid4()
return guid.hex if compat.PY3 else guid.get_hex()
def bytes_to_uint8_array(val, width=70):
"""
Formats a byte string for use as a uint8_t* literal in C/C++
"""
if len(val) == 0:
return '{}'
lines = []
line = '{' + str(ord(val[0]))
for x in val[1:]:
token = str(ord(x))
if len(line) + len(token) > width:
lines.append(line + ',')
line = token
else:
line += ',%s' % token
lines.append(line)
return '\n'.join(lines) + '}'
def unique_by_key(values, key):
id_to_table = {}
for x in values:
id_to_table[key(x)] = x
return compat.dict_values(id_to_table)
def indent(text, spaces):
block = ' ' * spaces
return '\n'.join(block + x for x in text.split('\n'))
def any_of(values, t):
for x in values:
if isinstance(x, t):
return True
return False
def all_of(values, t):
for x in values:
if not isinstance(x, t):
return False
return True
def promote_list(val):
if not isinstance(val, list):
val = [val]
return val
class IbisSet(object):
def __init__(self, keys=None):
self.keys = keys or []
@classmethod
def from_list(cls, keys):
return IbisSet(keys)
def __contains__(self, obj):
for other in self.keys:
if obj.equals(other):
return True
return False
def add(self, obj):
self.keys.append(obj)
class IbisMap(object):
def __init__(self):
self.keys = []
self.values = []
def __contains__(self, obj):
for other in self.keys:
if obj.equals(other):
return True
return False
def set(self, key, value):
self.keys.append(key)
self.values.append(value)
def get(self, key):
for k, v in zip(self.keys, self.values):
if key.equals(k):
return v
raise KeyError(key)
def is_function(v):
return isinstance(v, (types.FunctionType, types.LambdaType))
def adjoin(space, *lists):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
Brought over from from pandas
"""
out_lines = []
newLists = []
lengths = [max(map(len, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = [x.ljust(lengths[i]) for x in lst]
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n')
def _join_unicode(lines, sep=''):
try:
return sep.join(lines)
except UnicodeDecodeError:
sep = compat.unicode_type(sep)
return sep.join([x.decode('utf-8') if isinstance(x, str) else x
for x in lines])
def deprecate(f, message):
def g(*args, **kwargs):
print(message)
return f(*args, **kwargs)
return g
| apache-2.0 |
rohanp/scikit-learn | sklearn/metrics/regression.py | 5 | 17399 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Michael Eickenberg <michael.eickenberg@gmail.com>
# Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
from ..externals.six import string_types
import warnings
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
multioutput_options = (None, 'raw_values', 'uniform_average',
'variance_weighted')
if multioutput not in multioutput_options:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred,
sample_weight=None,
multioutput=None):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average',
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default value correponds to 'variance_weighted', this behaviour is
deprecated since version 0.17 and will be changed to 'uniform_average'
starting from 0.19.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<http://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted') # doctest: +ELLIPSIS
0.938...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if multioutput is None and y_true.shape[1] != 1:
warnings.warn("Default 'multioutput' behavior now corresponds to "
"'variance_weighted' value which is deprecated since "
"0.17, it will be changed to 'uniform_average' "
"starting from 0.19.",
DeprecationWarning)
multioutput = 'variance_weighted'
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
duthchao/kaggle-galaxies | try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_dup3.py | 7 | 17439 | import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
from datetime import datetime, timedelta
# import matplotlib.pyplot as plt
# plt.ion()
# import utils
BATCH_SIZE = 16
NUM_INPUT_FEATURES = 3
LEARNING_RATE_SCHEDULE = {
0: 0.04,
1800: 0.004,
2300: 0.0004,
}
MOMENTUM = 0.9
WEIGHT_DECAY = 0.0
CHUNK_SIZE = 10000 # 30000 # this should be a multiple of the batch size, ideally.
NUM_CHUNKS = 2500 # 3000 # 1500 # 600 # 600 # 600 # 500
VALIDATE_EVERY = 20 # 12 # 6 # 6 # 6 # 5 # validate only every 5 chunks. MUST BE A DIVISOR OF NUM_CHUNKS!!!
# else computing the analysis data does not work correctly, since it assumes that the validation set is still loaded.
NUM_CHUNKS_NONORM = 1 # train without normalisation for this many chunks, to get the weights in the right 'zone'.
# this should be only a few, just 1 hopefully suffices.
GEN_BUFFER_SIZE = 1
# # need to load the full training data anyway to extract the validation set from it.
# # alternatively we could create separate validation set files.
# DATA_TRAIN_PATH = "data/images_train_color_cropped33_singletf.npy.gz"
# DATA2_TRAIN_PATH = "data/images_train_color_8x_singletf.npy.gz"
# DATA_VALIDONLY_PATH = "data/images_validonly_color_cropped33_singletf.npy.gz"
# DATA2_VALIDONLY_PATH = "data/images_validonly_color_8x_singletf.npy.gz"
# DATA_TEST_PATH = "data/images_test_color_cropped33_singletf.npy.gz"
# DATA2_TEST_PATH = "data/images_test_color_8x_singletf.npy.gz"
TARGET_PATH = "predictions/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_dup3.csv"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_extradense_dup3.pkl"
# FEATURES_PATTERN = "features/try_convnet_chunked_ra_b3sched.%s.npy"
print "Set up data loading"
# TODO: adapt this so it loads the validation data from JPEGs and does the processing realtime
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)
]
num_input_representations = len(ds_transforms)
augmentation_params = {
'zoom_range': (1.0 / 1.3, 1.3),
'rotation_range': (0, 360),
'shear_range': (0, 0),
'translation_range': (-4, 4),
'do_flip': True,
}
augmented_data_gen = ra.realtime_augmented_data_gen(num_chunks=NUM_CHUNKS, chunk_size=CHUNK_SIZE,
augmentation_params=augmentation_params, ds_transforms=ds_transforms,
target_sizes=input_sizes)
post_augmented_data_gen = ra.post_augment_brightness_gen(augmented_data_gen, std=0.5)
train_gen = load_data.buffered_gen_mp(post_augmented_data_gen, buffer_size=GEN_BUFFER_SIZE)
y_train = np.load("data/solutions_train.npy")
train_ids = load_data.train_ids
test_ids = load_data.test_ids
# split training data into training + a small validation set
num_train = len(train_ids)
num_test = len(test_ids)
num_valid = num_train // 10 # integer division
num_train -= num_valid
y_valid = y_train[num_train:]
y_train = y_train[:num_train]
valid_ids = train_ids[num_train:]
train_ids = train_ids[:num_train]
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train + num_valid)
test_indices = np.arange(num_test)
def create_train_gen():
"""
this generates the training data in order, for postprocessing. Do not use this for actual training.
"""
data_gen_train = ra.realtime_fixed_augmented_data_gen(train_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_train, buffer_size=GEN_BUFFER_SIZE)
def create_valid_gen():
data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_valid, buffer_size=GEN_BUFFER_SIZE)
def create_test_gen():
data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test',
ds_transforms=ds_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes)
return load_data.buffered_gen_mp(data_gen_test, buffer_size=GEN_BUFFER_SIZE)
print "Preprocess validation data upfront"
start_time = time.time()
xs_valid = [[] for _ in xrange(num_input_representations)]
for data, length in create_valid_gen():
for x_valid_list, x_chunk in zip(xs_valid, data):
x_valid_list.append(x_chunk[:length])
xs_valid = [np.vstack(x_valid) for x_valid in xs_valid]
xs_valid = [x_valid.transpose(0, 3, 1, 2) for x_valid in xs_valid] # move the colour dimension up
print " took %.2f seconds" % (time.time() - start_time)
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4b = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
l4c = layers.DenseLayer(l4b, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4c, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
train_loss_nonorm = l6.error(normalisation=False)
train_loss = l6.error() # but compute and print this!
valid_loss = l6.error(dropout_active=False)
all_parameters = layers.all_parameters(l6)
all_bias_parameters = layers.all_bias_parameters(l6)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
y_shared = theano.shared(np.zeros((1,1), dtype=theano.config.floatX))
learning_rate = theano.shared(np.array(LEARNING_RATE_SCHEDULE[0], dtype=theano.config.floatX))
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l6.target_var: y_shared[idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
# updates = layers.gen_updates(train_loss, all_parameters, learning_rate=LEARNING_RATE, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates_nonorm = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss_nonorm, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
updates = layers.gen_updates_nesterov_momentum_no_bias_decay(train_loss, all_parameters, all_bias_parameters, learning_rate=learning_rate, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
train_nonorm = theano.function([idx], train_loss_nonorm, givens=givens, updates=updates_nonorm)
train_norm = theano.function([idx], train_loss, givens=givens, updates=updates)
compute_loss = theano.function([idx], valid_loss, givens=givens) # dropout_active=False
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens, on_unused_input='ignore') # not using the labels, so theano complains
compute_features = theano.function([idx], l4.output(dropout_active=False), givens=givens, on_unused_input='ignore')
print "Train model"
start_time = time.time()
prev_time = start_time
num_batches_valid = x_valid.shape[0] // BATCH_SIZE
losses_train = []
losses_valid = []
param_stds = []
for e in xrange(NUM_CHUNKS):
print "Chunk %d/%d" % (e + 1, NUM_CHUNKS)
chunk_data, chunk_length = train_gen.next()
y_chunk = chunk_data.pop() # last element is labels.
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
if e in LEARNING_RATE_SCHEDULE:
current_lr = LEARNING_RATE_SCHEDULE[e]
learning_rate.set_value(LEARNING_RATE_SCHEDULE[e])
print " setting learning rate to %.6f" % current_lr
# train without normalisation for the first # chunks.
if e >= NUM_CHUNKS_NONORM:
train = train_norm
else:
train = train_nonorm
print " load training data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
y_shared.set_value(y_chunk)
num_batches_chunk = x_chunk.shape[0] // BATCH_SIZE
# import pdb; pdb.set_trace()
print " batch SGD"
losses = []
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
loss = train(b)
losses.append(loss)
# print " loss: %.6f" % loss
mean_train_loss = np.sqrt(np.mean(losses))
print " mean training loss (RMSE):\t\t%.6f" % mean_train_loss
losses_train.append(mean_train_loss)
# store param stds during training
param_stds.append([p.std() for p in layers.get_param_values(l6)])
if ((e + 1) % VALIDATE_EVERY) == 0:
print
print "VALIDATING"
print " load validation data onto GPU"
for x_shared, x_valid in zip(xs_shared, xs_valid):
x_shared.set_value(x_valid)
y_shared.set_value(y_valid)
print " compute losses"
losses = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
loss = compute_loss(b)
losses.append(loss)
mean_valid_loss = np.sqrt(np.mean(losses))
print " mean validation loss (RMSE):\t\t%.6f" % mean_valid_loss
losses_valid.append(mean_valid_loss)
layers.dump_params(l6, e=e)
now = time.time()
time_since_start = now - start_time
time_since_prev = now - prev_time
prev_time = now
est_time_left = time_since_start * (float(NUM_CHUNKS - (e + 1)) / float(e + 1))
eta = datetime.now() + timedelta(seconds=est_time_left)
eta_str = eta.strftime("%c")
print " %s since start (%.2f s)" % (load_data.hms(time_since_start), time_since_prev)
print " estimated %s to go (ETA: %s)" % (load_data.hms(est_time_left), eta_str)
print
del chunk_data, xs_chunk, x_chunk, y_chunk, xs_valid, x_valid # memory cleanup
print "Compute predictions on validation set for analysis in batches"
predictions_list = []
for b in xrange(num_batches_valid):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_valid)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write validation set predictions to %s" % ANALYSIS_PATH
with open(ANALYSIS_PATH, 'w') as f:
pickle.dump({
'ids': valid_ids[:num_batches_valid * BATCH_SIZE], # note that we need to truncate the ids to a multiple of the batch size.
'predictions': all_predictions,
'targets': y_valid,
'mean_train_loss': mean_train_loss,
'mean_valid_loss': mean_valid_loss,
'time_since_start': time_since_start,
'losses_train': losses_train,
'losses_valid': losses_valid,
'param_values': layers.get_param_values(l6),
'param_stds': param_stds,
}, f, pickle.HIGHEST_PROTOCOL)
del predictions_list, all_predictions # memory cleanup
# print "Loading test data"
# x_test = load_data.load_gz(DATA_TEST_PATH)
# x2_test = load_data.load_gz(DATA2_TEST_PATH)
# test_ids = np.load("data/test_ids.npy")
# num_test = x_test.shape[0]
# x_test = x_test.transpose(0, 3, 1, 2) # move the colour dimension up.
# x2_test = x2_test.transpose(0, 3, 1, 2)
# create_test_gen = lambda: load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
print "Computing predictions on test data"
predictions_list = []
for e, (xs_chunk, chunk_length) in enumerate(create_test_gen()):
print "Chunk %d" % (e + 1)
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk] # move the colour dimension up.
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# make predictions for testset, don't forget to cute off the zeros at the end
for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_list.append(predictions)
all_predictions = np.vstack(predictions_list)
all_predictions = all_predictions[:num_test] # truncate back to the correct length
# postprocessing: clip all predictions to 0-1
all_predictions[all_predictions > 1] = 1.0
all_predictions[all_predictions < 0] = 0.0
print "Write predictions to %s" % TARGET_PATH
# test_ids = np.load("data/test_ids.npy")
with open(TARGET_PATH, 'wb') as csvfile:
writer = csv.writer(csvfile) # , delimiter=',', quoting=csv.QUOTE_MINIMAL)
# write header
writer.writerow(['GalaxyID', 'Class1.1', 'Class1.2', 'Class1.3', 'Class2.1', 'Class2.2', 'Class3.1', 'Class3.2', 'Class4.1', 'Class4.2', 'Class5.1', 'Class5.2', 'Class5.3', 'Class5.4', 'Class6.1', 'Class6.2', 'Class7.1', 'Class7.2', 'Class7.3', 'Class8.1', 'Class8.2', 'Class8.3', 'Class8.4', 'Class8.5', 'Class8.6', 'Class8.7', 'Class9.1', 'Class9.2', 'Class9.3', 'Class10.1', 'Class10.2', 'Class10.3', 'Class11.1', 'Class11.2', 'Class11.3', 'Class11.4', 'Class11.5', 'Class11.6'])
# write data
for k in xrange(test_ids.shape[0]):
row = [test_ids[k]] + all_predictions[k].tolist()
writer.writerow(row)
print "Gzipping..."
os.system("gzip -c %s > %s.gz" % (TARGET_PATH, TARGET_PATH))
del all_predictions, predictions_list, xs_chunk, x_chunk # memory cleanup
# # need to reload training data because it has been split and shuffled.
# # don't need to reload test data
# x_train = load_data.load_gz(DATA_TRAIN_PATH)
# x2_train = load_data.load_gz(DATA2_TRAIN_PATH)
# x_train = x_train.transpose(0, 3, 1, 2) # move the colour dimension up
# x2_train = x2_train.transpose(0, 3, 1, 2)
# train_gen_features = load_data.array_chunker_gen([x_train, x2_train], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# test_gen_features = load_data.array_chunker_gen([x_test, x2_test], chunk_size=CHUNK_SIZE, loop=False, truncate=False, shuffle=False)
# for name, gen, num in zip(['train', 'test'], [train_gen_features, test_gen_features], [x_train.shape[0], x_test.shape[0]]):
# print "Extracting feature representations for all galaxies: %s" % name
# features_list = []
# for e, (xs_chunk, chunk_length) in enumerate(gen):
# print "Chunk %d" % (e + 1)
# x_chunk, x2_chunk = xs_chunk
# x_shared.set_value(x_chunk)
# x2_shared.set_value(x2_chunk)
# num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE))) # need to round UP this time to account for all data
# # compute features for set, don't forget to cute off the zeros at the end
# for b in xrange(num_batches_chunk):
# if b % 1000 == 0:
# print " batch %d/%d" % (b + 1, num_batches_chunk)
# features = compute_features(b)
# features_list.append(features)
# all_features = np.vstack(features_list)
# all_features = all_features[:num] # truncate back to the correct length
# features_path = FEATURES_PATTERN % name
# print " write features to %s" % features_path
# np.save(features_path, all_features)
print "Done!"
| bsd-3-clause |
nitishkd/Customer-Prediction | graph.py | 1 | 1975 | import numpy as np;
import matplotlib.pyplot as plt
from sklearn import svm
dset_file = open("extradata.txt");
##print(dset_file.readline())
row = 0;
col = 0;
dset = []
for info in dset_file.readlines():
col = 0;
s = info.strip()
sa = s.split("(")
sb = sa[1].split(")")
k = sb[0].split(",")
lst = []
for ss in k:
val = int(ss)
lst.append(val)
dset.append(lst)
FD = np.array(dset)
X = FD[:, :3]
Y = FD[:, 3]
clf = svm.SVC()
clf.fit(X,Y)
#import pickle
#s = pickle.dumps(clf)
##clf2 = pickle.loads(s)
##print(clf2.predict([[22,3,4]]))
X = FD[:, 1:3] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = Y
h = .02 # step size in the mesh
C = 1.0 # SVM regularization parameter
##svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = FD[:, 2].min() - 1, FD[:, 2].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = [##'SVC with linear kernel',
##'LinearSVC (linear kernel)',
'SVC with RBF kernel'##
##'SVC with polynomial (degree 3) kernel'
]
#for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
for i, clf in enumerate((rbf_svc,)):
plt.plot(2,2)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('EXP')
plt.ylabel('FREQ')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| mit |
pdamodaran/yellowbrick | tests/test_meta.py | 1 | 5443 | # tests.test_meta
# Meta testing for testing helper functions!
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Sat Apr 07 13:16:53 2018 -0400
#
# ID: test_meta.py [] benjamin@bengfort.com $
"""
Meta testing for testing helper functions!
"""
##########################################################################
## Imports
##########################################################################
import os
import pytest
import inspect
import matplotlib as mpl
from tests.rand import RandomVisualizer
from unittest.mock import MagicMock, patch
from tests.base import ACTUAL_IMAGES, BASELINE_IMAGES
from tests.base import VisualTestCase, ImageComparison
from yellowbrick.exceptions import ImageComparisonFailure
def assert_path_exists(*parts):
# Hide this method from the pytest traceback on test failure.
__tracebackhide__ = True
path = os.path.join(*parts)
assert os.path.exists(path), "expected {} to exist".format(path)
def assert_path_not_exists(*parts):
# Hide this method from the pytest traceback on test failure.
__tracebackhide__ = True
path = os.path.join(*parts)
assert not os.path.exists(path), "expected {} not to exist".format(path)
##########################################################################
## Test Cases
##########################################################################
class TestMetaImageComparison(VisualTestCase):
"""
Meta Test: ImageComparison test cases
"""
def test_image_comparison(self):
"""
Test the image comparison initialization and properties
"""
def inner_assertion_function(ax):
stack = inspect.stack()
return ImageComparison(stack, ax=ax)
ax = MagicMock()
compare = inner_assertion_function(ax)
assert compare.ax is ax
assert compare.test_func_name == "test_image_comparison"
assert compare.test_module_path == "test_meta"
# Must use os.path.join for Windows/POSIX compatibility
assert compare.actual_image_path.endswith(os.path.join(
"tests", "actual_images", "test_meta", "test_image_comparison.png"
))
assert compare.baseline_image_path.endswith(os.path.join(
"tests", "baseline_images", "test_meta", "test_image_comparison.png"
))
@patch.object(ImageComparison, "cleanup")
@patch.object(ImageComparison, "save")
@patch.object(ImageComparison, "compare")
def test_image_comparison_call(self, mock_cleanup, mock_save, mock_compare):
"""
Test that image comparison cleans up, saves, and compares
"""
def inner_assertion_function():
stack = inspect.stack()
return ImageComparison(stack, ax=MagicMock())
compare = inner_assertion_function()
compare()
mock_cleanup.assert_called_once()
mock_save.assert_called_once()
mock_compare.assert_called_once()
def test_image_comparison_requires_ax(self):
"""
Assert raises if an axes object is not supplied
"""
with pytest.raises(ValueError, match="ax must be specified"):
ImageComparison(inspect.stack())
def test_image_comparison_not_in_assertion(self):
"""
Assert raises when image comparison not in assertion helper
"""
with pytest.raises(ValueError, match="not a test function"):
stack = inspect.stack()
ImageComparison(stack, ax=MagicMock())
def test_missing_baseline_image(self):
"""
Test that a missing basline image raises an exception
"""
viz = RandomVisualizer(random_state=14).fit()
viz.poof()
# Assert the baseline image does not exist
assert_path_not_exists(
BASELINE_IMAGES, "test_meta", "test_missing_baseline_image.png"
)
with pytest.raises(ImageComparisonFailure, match="image does not exist"):
self.assert_images_similar(viz)
# Assert the actual image was created (to copy to baseline)
assert_path_exists(
ACTUAL_IMAGES, "test_meta", "test_missing_baseline_image.png"
)
def test_random_visualizer(self):
"""
Test that a random visualization is correctly compared to a baseline
"""
viz = RandomVisualizer(random_state=111).fit()
viz.poof()
assert mpl.get_backend() == 'agg'
compare = self.assert_images_similar(viz, tol=1.0)
assert_path_exists(compare.actual_image_path)
assert_path_exists(compare.baseline_image_path)
def test_random_visualizer_not_close(self):
"""
Test that not close visualizers raise an assertion error.
"""
# Baseline image random_state=225
viz = RandomVisualizer(random_state=224).fit()
viz.poof()
with pytest.raises(ImageComparisonFailure, match="images not close"):
self.assert_images_similar(viz)
# Assert there is a diff
assert_path_exists(
ACTUAL_IMAGES, "test_meta", "test_random_visualizer_not_close-failed-diff.png"
)
def test_random_visualizer_increased_tolerance(self):
"""
Test that not close visualizers pass with increased tolerance
"""
viz = RandomVisualizer(random_state=224).fit()
viz.poof()
self.assert_images_similar(viz, tol=30)
| apache-2.0 |
benschneider/sideprojects1 | Shotnoise-Calibration/SNfit1.py | 1 | 19844 | # -*- coding: utf-8 -*-
'''
@author: Ben Schneider
A script is used to readout mtx measurement data which also contains
a shotnoise responses.
Then fits them for G and Tn
'''
import numpy as np
from parsers import savemtx, loadmtx, make_header, read_header
# from scipy.optimize import curve_fit # , leastsq
from scipy.constants import Boltzmann as kB
from scipy.constants import h, e, c # , pi
from scipy.ndimage.filters import gaussian_filter1d
from lmfit import minimize, Parameters, report_fit # , Parameter
# from matplotlib.pyplot import plot, hold, figure, show, title, ion, close
# import matplotlib
# matplotlib.use('Qt4Agg') # macosx')
import matplotlib.pyplot as plt
# import Gnuplot as gp
'''
The Two classes SN_class used to store the fit results
------------------------------------------------------
and the variable carrier used to carry and deal with variables that are
passed over quite often
'''
class SN_class():
'''
This is simply an empty class which i am going to use
to store the shot-noise fitting results
'''
def __init__(self):
''' Using empty lists at which i can append incoming data'''
self.G1del = []
self.G2del = []
self.Tn1del = []
self.Tn2del = []
self.G1 = []
self.G2 = []
self.Tn1 = []
self.Tn2 = []
self.T1 = []
self.T2 = []
self.T = []
self.Tdel = []
class variable_carrier():
''' used to store and pass lots of variables and locations
simply create with vc = my_variable_class()
it currently has the following default settings in __init__:
self.Z0 = 50.0
self.Zopt = 50.0
self.B = 1e5
self.f1 = 4.1e9
self.f2 = 4.8e9
self.RTR = 1009.1 * 1e3 # Ib Resistance in Ohm
self.RG = 1000.0 # Pre Amp gain factor
self.filein1 = 'S1_949_G0mV_SN_PCovMat_cI1I1.mtx'
self.filein2 = 'S1_949_G0mV_SN_PCovMat_cQ1Q1.mtx'
self.filein3 = 'S1_949_G0mV_SN_PCovMat_cI2I2.mtx'
self.filein4 = 'S1_949_G0mV_SN_PCovMat_cQ2Q2.mtx'
self.filein5 = 'S1_949_G0mV_SN_PV'
self.fifolder = 'sn_data//'
Cross corr files
self.filein6 = 'S1_949_G0mV_SN_PCovMat_cI1I2.mtx'
self.filein7 = 'S1_949_G0mV_SN_PCovMat_cI1Q2.mtx'
self.filein8 = 'S1_949_G0mV_SN_PCovMat_cQ1I2.mtx'
self.filein9 = 'S1_949_G0mV_SN_PCovMat_cQ1Q2.mtx'
self.fifolder = 'sn_data//'
'''
def __init__(self):
self.LP = 3 # Gaus-Filter i.e. Low-Pass Vm derivative
self.Z0 = 50.0
self.Zopt = 50.0
self.B = 1e5
self.f1 = 4.1e9
self.f2 = 4.8e9
self.RTR = 1009.1 * 1e3 # Ib Resistance in Ohm
self.RG = 1000.0 # Pre Amp gain factor
self.cvals = {} # Cross corr dictionary with key value elements
def load_and_go(self):
'''
simply executes the sub definitions
loads data,
normalizes to SI units,
calculates differential resistances
'''
self.loaddata()
self.loadCcor()
self.norm_to_SI()
def loaddata(self):
'''
Loads the data defined in self.filein1 ..
This loads the shotnoise relevant data files
'''
self.I1I1, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein1, True)
self.Q1Q1, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein2, True)
self.I2I2, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein3, True)
self.Q2Q2, self.d3I, self.d2, self.d1, self.dz = loadmtx(self.fifolder + self.filein4, True)
self.Vm, self.d3, dv2, dv1, dvz = loadmtx(self.fifolder + self.filein5, True)
self.lags0 = find_nearest(self.d1.lin, 0.0) # lags position
self.Ib0 = find_nearest(self.d3.lin, 0.0) # Zero current position
def loadCcor(self):
'''
want to simply load the amplitude at max correlation position
i.e. at lags = 0
'''
self.I1I2, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein6, True)
self.I1Q2, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein7, True)
self.Q1I2, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein8, True)
self.Q1Q2, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein9, True)
self.I1Q1, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein10, True)
self.I2Q2, d3, d2, d1, dz = loadmtx(self.fifolder + self.filein11, True)
self.cPD1 = (self.I1I1[self.lags0] + self.Q1Q1[self.lags0])
self.cPD2 = (self.I2I2[self.lags0] + self.Q2Q2[self.lags0])
def norm_to_SI(self):
'''
Take amplifier gains and resistances as defined by self.RTR and self.RG
to scale voltage units to [Volt] and [Amps]
'''
self.d3.scale = 1.0 / (self.RTR) # scale X-axis to Amps
self.d3.update_lin()
self.I = self.d3.lin
self.Vm = self.Vm / self.RG # scale Vm-data to Volts
def calc_diff_resistance(self):
'''
calculates the differential resistance of all traces in the
variable carrier
'''
self.d3step = self.d3.lin[1] - self.d3.lin[0] # get step-size
self.dIV = xderiv(self.Vm[0], self.d3step)
if self.LP > 0.0:
self.dIVlp = gaussian_filter1d(abs(self.dIV), self.LP) # Gausfilter
else:
self.dIVlp = abs(self.dIV)
def make_cvals(self, cpt=5, snr=2):
'''
Using this function to obtain the amount of noise present
in the background while ignoring the regions where the cross corr...
are expected (5pt around the zero lags position).
'''
self.Sarr = np.zeros([8, self.d2.pt, self.d3.pt])
self.Narr = np.zeros([8, self.d2.pt, self.d3.pt])
self.Varr = np.zeros([8, self.d2.pt, self.d3.pt])
S = np.zeros(8)
N = np.zeros(8)
V = np.zeros(8)
for x2 in range(self.d2.pt):
for x3 in range(self.d3.pt):
S[0], N[0] = get_SNR(self.I1I1[:, x2, x3], cpt)
S[1], N[1] = get_SNR(self.Q1Q1[:, x2, x3], cpt)
S[2], N[2] = get_SNR(self.I2I2[:, x2, x3], cpt)
S[3], N[3] = get_SNR(self.Q2Q2[:, x2, x3], cpt)
V[:4] = S[:4]
S[4], N[4] = get_SNR(self.I1I2[:, x2, x3], cpt)
S[5], N[5] = get_SNR(self.I1Q2[:, x2, x3], cpt)
S[6], N[6] = get_SNR(self.Q1I2[:, x2, x3], cpt)
S[7], N[7] = get_SNR(self.Q1Q2[:, x2, x3], cpt)
V[4] = S[4] if abs(S[4]) > snr * N[4] else 0.0
V[5] = S[5] if abs(S[5]) > snr * N[5] else 0.0
V[6] = S[6] if abs(S[6]) > snr * N[6] else 0.0
V[7] = S[7] if abs(S[7]) > snr * N[7] else 0.0
self.Sarr[:, x2, x3] = S
self.Narr[:, x2, x3] = N
self.Varr[:, x2, x3] = V
def get_SNR(array, distance):
pos0 = find_absPeakPos(array, distance)
offset = getOffset(array, pos0, 4)
signal = array[pos0] - offset
noise = calU(array, pos0, distance)
return signal, noise
def calU(z1, lags0, cpt):
'''
This function removes from an array
cpt points around the lag0 position
and returns the square root variance of this new array.
Get background noise value of the cross correlation data.
'''
z2 = z1[:lags0 - cpt] * 1.0
z3 = z1[lags0 + cpt:] * 1.0
return abs(np.sqrt(np.var(np.concatenate([z2, z3]))))/2.0
def getOffset(z1, lags0, cpt):
'''
This function removes from an array
cpt points around the lag0 position
and returns the mean offset
'''
z2 = z1[:lags0 - cpt] * 1.0
z3 = z1[lags0 + cpt:] * 1.0
return abs(np.mean(np.concatenate([z2, z3])))
def find_switch(array, threshold=1e-9):
'''
array is a 1-D array
threshold is the limit of passable point to point changes.
finds a position at which a sudden switch/jump is present on the order of
switch
A switch is found by two successive jumps, one down/up and then another.
(of course this will give a false response for two steps in succession)
If these happen in succession we found a single pixel error
(returns only the first point it finds then it stops searching)
'''
a = array[0]
idx = 0
for mm, val in enumerate(array):
if abs(a-val) > threshold:
if (mm-1 == idx): # if prev and current position had a jump we found one
return idx
idx = mm
a = val
def fix_switch_part1(array, pos):
'''
The fix only works for continuous functions.
It tries to find a point such that the first and second
derivative is smooth at the position.
'''
darray = xderiv(array) # first x-derivative
ddarray = xderiv(darray) # second x-derivative
return ddarray
def find_absPeakPos(someArray, range=1):
'''
finds within a short range around the center of
an array the peak/ dip value position
and returns value at the position.
1. abs(someArray)
2. crop someArray with the range
3. max(someArray)
assumes that the mean of someArray = 0
'''
Array = np.abs(someArray * 1.0)
A0 = int(len(Array)/2) # A0 center pos (round down)
pos = np.argmax(Array[A0-range:A0+range+1])+A0-range
# return someArray[pos]
return pos
def xderiv(d2MAT, dx=1.0, axis=0):
'''
This derivative is inaccurate at the edges.
Calculates a 3p derivative of a 1D, 2D matrix.
This does not require you to shift the xaxis by one half pt.
dx = distance between points
'''
if len(d2MAT.shape) > 1:
if axis == 1:
''' Not tested yet could be faster than a matrix transpose'''
a2 = np.zeros([d2MAT.shape[0] + 2, d2MAT.shape[1]])
a2[1:-1, :] = d2MAT
m1 = d2MAT - a2[:-2, :]
m2 = a2[2:, :] - d2MAT
dy = (m1 + m2) / 2.0
dy[0, :] = dy[1, :]
dy[-1, :] = dy[-2, :]
elif axis == 0:
a2 = np.zeros([d2MAT.shape[0], d2MAT.shape[1] + 2])
a2[:, 1:-1] = d2MAT
m1 = d2MAT - a2[:, :-2]
m2 = a2[:, 2:] - d2MAT
dy = (m1 + m2) / 2.0
dy[:, 0] = dy[:, 1]
dy[:, -1] = dy[:, -2]
return dy / dx
else:
a2 = np.zeros([d2MAT.shape[0] + 2])
a2[1:-1] = d2MAT
m1 = d2MAT - a2[:-2]
m2 = a2[2:] - d2MAT
dy = (m1 + m2) / 2.0
dy[0] = dy[1]
dy[-1] = dy[-2]
return dy / dx
def find_nearest(someArray, value):
''' This function helps to find the index corresponding to a value
in an array.
Usage: indexZero = find_nearest(myarray, 0.0)
returns: abs(myarray-value).argmin()
'''
idx = abs(someArray - value).argmin()
return idx
def fitfunc(G, Tn, T, vc):
'''
This contains the new fitting equation, which i use to fit the
shot noise response.
returns: fit-value(x, ...)
Amplifier and circulator impedance are both assumed to be Z0
'''
mf = (vc.Z0 / (vc.Z0 + vc.dRm))**2.0
vvzpf = h * vc.f * vc.Z0 / 2.0
vvsn = 2.0 * e * np.abs(vc.I) * vc.dRm * vc.dRm * mf
vvnt = 4.0 * kB * T * vc.dRm * mf + 1e-99
# vvnt = (4.0 * kB * vc.Tz * vc.Z0 * mf) + (4.0 * kB * T * vc.dRm * mf) + 1e-99
E1 = (vvsn + vvzpf) / (vvnt)
E2 = (vvsn - vvzpf) / (vvnt)
Svi = vvnt / (2.0 * vc.Z0) * (E1 / np.tanh(E1) + E2 / np.tanh(E2))
return vc.B * G * (Svi + kB * Tn)
def fitfun2(params, vc, digi):
'''
req: params with G, Tn, T; and vc as variable carrier
return: fitting value or array
also set digi='D1' or digi='D2'
This function loads the params values
such that the values corresponding to the digitizer are used
'''
T = params['T'].value
vc.Tz = params['Tz'].value
if digi == 'D1':
G = params['G1'].value
Tn = params['Tn1'].value
vc.f = vc.f1
if digi == 'D2':
G = params['G2'].value
Tn = params['Tn2'].value
vc.f = vc.f2
return fitfunc(G, Tn, T, vc)
def cropTrace(trace, vc):
'''
'crops' a trace with the specifics given by
sets values to zero:
with the range of
vc.crop[0][0] to vc.crop[0][1]
the outer edges
fist index til vc.crop[1][0]
vc.crop[1][1] til last index
crop values for example create with:
crop_within = find_nearest(I, -0.9e-6), find_nearest(I, 1.1e-6)
crop_outside = find_nearest(I, -19.5e-6), find_nearest(I, 19.5e-6)
crop = [crop_within, crop_outside]
'''
newarray = trace*1.0
newarray[vc.crop[0][0]:vc.crop[0][1]] = 0.0
newarray[0:(vc.crop[1][0])] = 0.0
newarray[vc.crop[1][1]:-1] = 0.0
return np.array(newarray)
def get_residuals(pidx, vc, params, digi='D1'):
'''
returns residuals, and difference between min(data) - min(fit)
'''
if digi == 'D1':
factor = vc.f1*h*vc.B*params['G1'].value # factor to photon #
data = np.array(vc.cPD1[pidx]) * 1.0
SNf = fitfun2(params, vc, digi='D1')
if digi == 'D2':
factor = vc.f2*h*vc.B*params['G2'].value
data = np.array(vc.cPD2[pidx]) * 1.0
SNf = fitfun2(params, vc, digi='D2')
res = np.array(np.abs((data - SNf)/factor))
res2 = cropTrace(res, vc)
pmin = np.abs(data.min() - SNf.min())/factor # adding additional weight to respect min values
scpos = vc.Ib0
p = np.abs(data[scpos-1:scpos+2] - SNf[scpos-1:scpos+2])/factor
res2[scpos-1:scpos+2] = p
# d0 = np.mean(np.sort(data)[:5])/factor
# d1 = np.mean(np.sort(SNf)[:5])/factor
# p = np.abs(d0-d1)
# print res2
return res2 + pmin
def ministuff(params, vc, pidx, digi='D1'):
'''
req: params with G, Tn, T
I (current array or value) (Amps)
dRm (Resistance for this current)
digi='D1' or 'D2'
pidx index number to select power trace
This crop is used to cut data corresponding to the current values
i.e. to cut away the critical current part (from to (crop within))
also edges where the derivative and filter is inaccurate (crop outside)
returns: residuals*1e10;
(difference between measured and fitted data after it has been croped)
'''
return get_residuals(pidx, vc, params, digi)
def bigstuff(params, vc, pidx):
'''
return all 3 as one combined thing (shotnoise 1,2 and photon differences)
'''
res1 = get_residuals(pidx, vc, params, digi='D1')
res2 = get_residuals(pidx, vc, params, digi='D2')
return abs(res1 + 1) * abs(res2 + 1) - 1.0
def DoSNfits(vc, plotFit=False):
'''
Loading the data files I1I1, Q1Q1, I2I2, Q2Q2, Vm
d1, d2, d3 are all the same since they all originate from the same type of
measurement.
before running it needs details of files and parameters to use.
Those are made by creating a variables_class;
example:
vc = my_variables_class()
which contains the following default settings
vc.Z0 = 50.0
vc.Zopt = 50.0
vc.B = 1e5
vc.f1 = 4.1e9
vc.f2 = 4.8e9
vc.RTR = 1009.1 * 1e3 # Ib Resistance in Ohms
vc.RG = 1000.0 # Pre Amp gain factor
additionally the filenames need to be defined in there:
simply give the base filenames as:
vc.filein1 = 'S1_949_G0mV_SN_PCovMat_cI1I1.mtx'
vc.filein2 = 'S1_949_G0mV_SN_PCovMat_cQ1Q1.mtx'
vc.filein3 = 'S1_949_G0mV_SN_PCovMat_cI2I2.mtx'
vc.filein4 = 'S1_949_G0mV_SN_PCovMat_cQ2Q2.mtx'
vc.filein5 = 'S1_949_G0mV_SN_PV.mtx'
and of course the folder where to find these files
vc.fifolder = 'sn_data//'
Right now this def getSNfits does too many things for a single definition:
- loads the defined mtx files into the vc class
'''
if plotFit is True:
plt.close('all')
# plt.ion()
SNr = SN_class()
vc.load_and_go()
vc.calc_diff_resistance()
# create fitting parameters
params = Parameters()
params.add('Tn1', value=3.2, vary=True, min=0.0, max=15.0)
params.add('G1', value=5.38e7, vary=True, min=1e3, max=1e17)
params.add('Tn2', value=4.7, vary=True, min=0.0, max=15.0)
params.add('G2', value=3.8e7, vary=True, min=1e3, max=1e17)
params.add('T', value=vc.Texp, vary=False, min=0.001, max=0.06)
params.add('Tz', value=0.0, vary=False, min=0.000, max=0.050)
for pidx in range(vc.cPD1.shape[0]):
'''
scales Voltage_trace[selected power] to Volts
obtains differential Resistance Rm
fits selected data set
records corresponding fit results into SN_r class values
'''
# vc.dRm = vc.dIVlp[pidx, ::-1] # select dRm
vc.dRm = vc.dIVlp[pidx] # select dRm
if vc.Ravg > 0.0:
vc.dRm = np.ones_like(vc.dRm)*vc.Ravg
# correct diff/Resistance at SC branch:
vc.dRm[vc.Ib0] = 0.0
result = minimize(ministuff, params, args=(vc, pidx, 'D1'))
result = minimize(ministuff, result.params, args=(vc, pidx, 'D2'))
# now fit all of them together:
result.params['T'].vary = True
result = minimize(bigstuff, result.params, args=(vc, pidx))
print 'RF power', vc.d2.lin[pidx]
print report_fit(result)
SNr.G1del.append(result.params['G1'].stderr)
SNr.Tn1del.append(result.params['Tn1'].stderr)
SNr.G1.append(result.params['G1'].value)
SNr.Tn1.append(result.params['Tn1'].value)
SNr.G2del.append(result.params['G2'].stderr)
SNr.Tn2del.append(result.params['Tn2'].stderr)
SNr.G2.append(result.params['G2'].value)
SNr.Tn2.append(result.params['Tn2'].value)
SNr.T.append(result.params['T'].value)
SNr.Tdel.append(result.params['T'].stderr)
if plotFit is True:
plotSNfit(result, vc, pidx, 'D1')
if plotFit is True:
plotSNfit(result, vc, pidx, 'D2')
# lists to array
SNr.G1 = np.array(SNr.G1)
SNr.G2 = np.array(SNr.G2)
SNr.Tn1 = np.array(SNr.Tn1)
SNr.Tn2 = np.array(SNr.Tn2)
SNr.G1del = np.array(SNr.G1del)
SNr.G2del = np.array(SNr.G2del)
SNr.Tn1del = np.array(SNr.Tn1del)
SNr.Tn2del = np.array(SNr.Tn2del)
# Photon numbers hemt input
SNr.Pi1 = (kB * SNr.Tn1) / (h * vc.f1) + 0.5
SNr.Pi1del = (kB * SNr.Tn1del) / (h * vc.f1)
SNr.Pi2 = (kB * SNr.Tn2) / (h * vc.f2) + 0.5
SNr.Pi2del = (kB * SNr.Tn2del) / (h * vc.f2)
# Noise power at output at I = 0
SNr.Pn1 = SNr.G1 * vc.B * SNr.Pi1 * (h * vc.f1)
SNr.Pn1del = (SNr.Pn1 * np.sqrt((SNr.G1del / SNr.G1)**2 +
(SNr.Tn1del / SNr.Tn1)**2))
SNr.Pn2 = SNr.G2 * vc.B * SNr.Pi2 * (h * vc.f2)
SNr.Pn2del = (SNr.Pn2 * np.sqrt((SNr.G2del / SNr.G2)**2 +
(SNr.Tn2del / SNr.Tn2)**2))
return SNr
def plotSNfit(result, vc, pidx, digi='D1'):
''' result : fitting results
vc, variable carrier
pidx power index
digi = 'D1' or 'D2'
'''
if digi == 'D1':
data = vc.cPD1[pidx] * 1.0
SNfit = fitfun2(result.params, vc, 'D1')
G = result.params['G1'].value
Amp = G*vc.B*kB*result.params['Tn1'].value
f = vc.f1
if digi == 'D2':
data = vc.cPD2[pidx] * 1.0
SNfit = fitfun2(result.params, vc, 'D2')
G = result.params['G2'].value
Amp = G*vc.B*kB*result.params['Tn2'].value
f = vc.f2
plt.figure()
title2 = (digi + ', RF-Drive: ' + str(vc.d2.lin[pidx]))
plt.plot(vc.I, (data-Amp)/(vc.B*G*h*f))
# plt.plot(vc.I*1e6, 1e9*data)
plt.hold(True)
plt.plot(vc.I, (SNfit-Amp)/(vc.B*G*h*f))
# plt.plot(vc.I*1e6, 1e9*SNfit)
plt.title(title2)
plt.hold(False)
| gpl-2.0 |
Erotemic/ibeis | ibeis/expt/test_result.py | 1 | 108427 | # -*- coding: utf-8 -*-
# TODO: find unused functions and kill them
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import copy
import operator
import utool as ut
import vtool_ibeis as vt
import numpy as np
import itertools as it
from functools import partial
from six import next
from six.moves import zip, range, map, reduce
from ibeis.expt import cfghelpers
from ibeis.expt import experiment_helpers
print, rrr, profile = ut.inject2(__name__)
def build_cmsinfo(cm_list, qreq_):
"""
Helper function to report results over multiple queries (chip matches).
Basically given a group of queries of the same name, we only care if one of
them is correct. This emulates encounters.
Runs queries of a specific configuration returns the best rank of each
query.
Args:
cm_list (list): list of chip matches
qreq_ (QueryRequest): request that computed the chip matches.
Returns:
dict: cmsinfo - info about multiple chip matches cm_list
CommandLine:
python -m ibeis get_query_result_info
python -m ibeis get_query_result_info:0 --db lynx \
-a :qsame_imageset=True,been_adjusted=True,excluderef=True -t :K=1
python -m ibeis get_query_result_info:0 --db lynx \
-a :qsame_imageset=True,been_adjusted=True,excluderef=True -t :K=1 --cmd
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> import ibeis
>>> qreq_ = ibeis.main_helpers.testdata_qreq_(a=[':qindex=0:3,dindex=0:5'])
>>> cm_list = qreq_.execute()
>>> cmsinfo = build_cmsinfo(cm_list, qreq_)
>>> print(ut.repr2(cmsinfo))
Ignore:
ibeis -e rank_cmc --db humpbacks -a :has_any=hasnotch,mingt=2 \
-t :proot=BC_DTW --show --nocache-big
ibeis -e rank_cmc --db humpbacks -a :is_known=True,mingt=2 \
-t :pipeline_root=BC_DTW
ibeis -e rank_cmc --db humpbacks -a :is_known=True \
-t :pipeline_root=BC_DTW \
--qaid=1,9,15,16,18 --daid-override=1,9,15,16,18,21,22 \
--show --debug-depc
--clear-all-depcache
"""
ibs = qreq_.ibs
qaids = qreq_.qaids
daids = qreq_.daids
# Get the groundtruth ranks and accuracy measures
# qx2_cminfo = [cm.summarize(qreq_) for cm in cm_list]
qx2_cminfo = [cm.extend_results(qreq_).summarize(qreq_) for cm in cm_list]
cmsinfo = ut.dict_stack(qx2_cminfo, 'qx2_')
cmsinfo['qx2_gt_rank'] = ut.replace_nones(cmsinfo['qx2_gt_rank'] , -1)
if False:
qx2_gtaids = ibs.get_annot_groundtruth(qaids, daid_list=daids)
qx2_avepercision = np.array(
[cm.get_average_percision(ibs=ibs, gt_aids=gt_aids) for
(cm, gt_aids) in zip(cm_list, qx2_gtaids)])
cmsinfo['qx2_avepercision'] = qx2_avepercision
# Compute mAP score # TODO: use mAP score
# (Actually map score doesn't make much sense if using name scoring
#mAP = qx2_avepercision[~np.isnan(qx2_avepercision)].mean() # NOQA
qaids = qreq_.qaids
#qaids2 = [cm.qaid for cm in cm_list]
# qnids = qreq_.get_qreq_annot_nids(qaids) # TODO: use new nid getter
qnids = ibs.get_annot_nids(qaids)
unique_dnids = np.unique(ibs.get_annot_nids(qreq_.daids))
unique_qnids, groupxs = ut.group_indices(qnids)
cm_group_list = ut.apply_grouping(cm_list, groupxs)
qnid2_aggnamescores = {}
qnx2_nameres_info = []
# Ranked list aggregation over groups of query annots
nameres_info_list = []
for qnid, cm_group in zip(unique_qnids, cm_group_list):
nid2_name_score_group = [
dict([(nid, cm.name_score_list[nidx])
for nid, nidx in cm.nid2_nidx.items()])
for cm in cm_group
]
aligned_name_scores = np.array([
ut.dict_take(nid_to_name_score, unique_dnids.tolist(), -np.inf)
for nid_to_name_score in nid2_name_score_group
]).T
name_score_list = np.nanmax(aligned_name_scores, axis=1)
qnid2_aggnamescores[qnid] = name_score_list
# sort
sortx = name_score_list.argsort()[::-1]
sorted_namescores = name_score_list[sortx]
sorted_dnids = unique_dnids[sortx]
## infer agg name results
success = sorted_dnids == qnid
failure = np.logical_and(~success, sorted_dnids > 0)
gt_name_rank = None if not np.any(success) else np.where(success)[0][0]
gf_name_rank = None if not np.any(failure) else np.nonzero(failure)[0][0]
gt_nid = sorted_dnids[gt_name_rank]
gf_nid = sorted_dnids[gf_name_rank]
gt_name_score = sorted_namescores[gt_name_rank]
gf_name_score = sorted_namescores[gf_name_rank]
if gt_name_score <= 0:
# ensure failure cases are loud give them the worst possible rank
# instead of a random one.
gt_name_rank = len(qreq_.dnids) + 1
qnx2_nameres_info = {}
qnx2_nameres_info['qnid'] = qnid
qnx2_nameres_info['gt_nid'] = gt_nid
qnx2_nameres_info['gf_nid'] = gf_nid
qnx2_nameres_info['gt_name_rank'] = gt_name_rank
qnx2_nameres_info['gf_name_rank'] = gf_name_rank
qnx2_nameres_info['gt_name_score'] = gt_name_score
qnx2_nameres_info['gf_name_score'] = gf_name_score
nameres_info_list.append(qnx2_nameres_info)
nameres_info = ut.dict_stack(nameres_info_list, 'qnx2_')
cmsinfo.update(nameres_info)
return cmsinfo
def combine_testres_list(ibs, testres_list):
"""
combine test results over multiple annot configs
The combination of pipeline and annotation config is indexed by cfgx.
A cfgx corresponds to a unique query request
CommandLine:
python -m ibeis --tf combine_testres_list
python -m ibeis --tf -draw_rank_cmc --db PZ_MTEST --show
python -m ibeis --tf -draw_rank_cmc --db PZ_Master1 --show
python -m ibeis --tf -draw_rank_cmc --db PZ_MTEST --show -a varysize -t default
python -m ibeis --tf -draw_rank_cmc --db PZ_MTEST --show -a varysize -t default
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> from ibeis.expt import harness
>>> ibs, testres = harness.testdata_expts('PZ_MTEST', ['varysize'])
"""
import copy
from ibeis.expt import annotation_configs
acfg_list = [tr.acfg for tr in testres_list]
acfg_lbl_list = annotation_configs.get_varied_acfg_labels(acfg_list)
flat_acfg_list = annotation_configs.flatten_acfg_list(acfg_list)
nonvaried_acfg, varied_acfg_list = ut.partition_varied_cfg_list(flat_acfg_list)
def combine_lbls(lbl, acfg_lbl):
if len(lbl) == 0:
return acfg_lbl
if len(acfg_lbl) == 0:
return lbl
return lbl + '+' + acfg_lbl
# TODO: depcirate cfg_dict list for pcfg_list (I think)
agg_cfg_list = ut.flatten([tr.cfg_list for tr in testres_list])
agg_cfgx2_qreq_ = ut.flatten([tr.cfgx2_qreq_ for tr in testres_list])
agg_cfgdict_list = ut.flatten([tr.cfgdict_list for tr in testres_list])
agg_cfgx2_cmsinfo = ut.flatten([tr.cfgx2_cmsinfo for tr in testres_list])
agg_varied_acfg_list = ut.flatten([
[acfg] * len(tr.cfg_list)
for tr, acfg in zip(testres_list, varied_acfg_list)
])
agg_cfgx2_lbls = ut.flatten(
[[combine_lbls(lbl, acfg_lbl) for lbl in tr.cfgx2_lbl]
for tr, acfg_lbl in zip(testres_list, acfg_lbl_list)])
agg_cfgx2_acfg = ut.flatten(
[[copy.deepcopy(acfg)] * len(tr.cfg_list) for
tr, acfg in zip(testres_list, acfg_list)])
big_testres = TestResult(agg_cfg_list, agg_cfgx2_lbls,
agg_cfgx2_cmsinfo, agg_cfgx2_qreq_)
# Give the big test result an acfg that is common between everything
big_testres.acfg = annotation_configs.unflatten_acfgdict(nonvaried_acfg)
# TODO: cfgdict_list -> pcfg_list
big_testres.cfgdict_list = agg_cfgdict_list # TODO: depricate
big_testres.common_acfg = annotation_configs.compress_aidcfg(big_testres.acfg)
big_testres.common_cfgdict = reduce(ut.dict_intersection, big_testres.cfgdict_list)
big_testres.varied_acfg_list = agg_varied_acfg_list
big_testres.nonvaried_acfg = nonvaried_acfg
big_testres.varied_cfg_list = [
ut.delete_dict_keys(cfgdict.copy(), list(big_testres.common_cfgdict.keys()))
for cfgdict in big_testres.cfgdict_list]
big_testres.acfg_list = acfg_list
big_testres.cfgx2_acfg = agg_cfgx2_acfg
big_testres.cfgx2_pcfg = agg_cfgdict_list
assert len(agg_cfgdict_list) == len(agg_cfgx2_acfg)
#big_testres.acfg
testres = big_testres
# big_testres = testres
return testres
@six.add_metaclass(ut.ReloadingMetaclass)
class TestResult(ut.NiceRepr):
"""
CommandLine:
export SMK_PIPE="smk:nwords=[64000],sv=[False]"
ibeis TestResult --db PZ_MTEST -a ctrl -p $SMK_PIPE
ibeis TestResult --db Oxford -a oxford -p $SMK_PIPE
Example:
>>> # Script
>>> from ibeis.init import main_helpers
>>> import utool as ut
>>> ibs, testres = main_helpers.testdata_expts()
>>> testres.help()
>>> actions = testres.get_actions()
>>> testres.map_score()
>>> ut.qtensure()
>>> prompt = ut.InteractivePrompt(actions)
>>> prompt.loop()
"""
def __init__(testres, cfg_list, cfgx2_lbl, cfgx2_cmsinfo, cfgx2_qreq_):
assert len(cfg_list) == len(cfgx2_lbl), (
'bad lengths1: %r != %r' % (len(cfg_list), len(cfgx2_lbl)))
assert len(cfgx2_qreq_) == len(cfgx2_lbl), (
'bad lengths2: %r != %r' % (len(cfgx2_qreq_), len(cfgx2_lbl)))
assert len(cfgx2_cmsinfo) == len(cfgx2_lbl), (
'bad lengths3: %r != %r' % (len(cfgx2_cmsinfo), len(cfgx2_lbl)))
# TODO rename cfg_list to pcfg_list
testres.cfg_list = cfg_list
testres.cfgx2_lbl = cfgx2_lbl
testres.cfgx2_cmsinfo = cfgx2_cmsinfo
testres.cfgx2_qreq_ = cfgx2_qreq_
# TODO: uncomment
#testres.cfgx2_acfg
#testres.cfgx2_qcfg
#testres.acfg_list = None #
testres.lbl = None
testres.testnameid = None
@classmethod
def from_cms(TestResult, cm_list, qreq_):
cfg_list = [qreq_.qparams] # should actually be the specified dict
cfgx2_lbl = ['unspecified']
cmsinfo = build_cmsinfo(cm_list, qreq_)
cfgx2_cmsinfo = [cmsinfo]
cfgx2_qreq_ = [qreq_]
testres = TestResult(cfg_list, cfgx2_lbl, cfgx2_cmsinfo, cfgx2_qreq_)
return testres
def __str__(testres):
return testres.reconstruct_test_flags()
#def __repr__(testres):
# return testres._custom_str()
def __nice__(testres):
dbname = None if testres.ibs is None else testres.ibs.get_dbname()
# hashkw = dict(_new=True, pathsafe=False)
infostr_ = 'nCfg=%s' % testres.nConfig
if testres.nConfig == 1:
qreq_ = testres.cfgx2_qreq_[0]
infostr_ += ' nQ=%s, nD=%s %s' % (len(qreq_.qaids), len(qreq_.daids), qreq_.get_pipe_hashid())
# nD=%s %s' % (, len(testres.daids), testres.get_pipe_hashid())
nice = '%s %s' % (dbname, infostr_)
return nice
@property
def ibs(testres):
ibs_list = []
for qreq_ in testres.cfgx2_qreq_:
try:
ibs_list.append(qreq_.ibs)
except AttributeError:
ibs_list.append(qreq_.depc.controller)
ibs = ibs_list[0]
for ibs_ in ibs_list:
assert ibs.get_dbdir() == ibs_.get_dbdir(), (
'all requests must use the same database')
return ibs
@property
def qaids(testres):
assert testres.has_constant_qaids(), 'must have constant qaids to use this property'
return testres.cfgx2_qaids[0]
#return testres._qaids
@property
def nConfig(testres):
# FIXME: this is the number of requests not the number of
# pipeline configurations
return len(testres.cfg_list)
@property
def unique_pcfgs(testres):
unique_idxs = ut.unique_indices(map(id, testres.cfgx2_pcfg))
return ut.take(testres.cfgx2_pcfg, unique_idxs)
@property
def nQuery(testres):
return len(testres.qaids)
@property
def rank_mat(testres):
return testres.get_rank_mat()
@property
def cfgx2_daids(testres):
daids_list = [qreq_.daids for qreq_ in testres.cfgx2_qreq_]
return daids_list
@property
def cfgx2_qaids(testres):
qaids_list = [qreq_.qaids for qreq_ in testres.cfgx2_qreq_]
return qaids_list
def has_constant_daids(testres):
return ut.allsame(testres.cfgx2_daids)
def has_constant_qaids(testres):
return ut.allsame(testres.cfgx2_qaids)
def has_constant_length_daids(testres):
return ut.allsame(list(map(len, testres.cfgx2_daids)))
def has_constant_length_qaids(testres):
return ut.allsame(list(map(len, testres.cfgx2_qaids)))
def get_infoprop_list(testres, key, qaids=None):
"""
key = 'qx2_gt_rank'
key = 'qx2_gt_rank'
qaids = testres.get_test_qaids()
"""
if key == 'participant':
# Get if qaids are part of the config
cfgx2_infoprop = [np.in1d(qaids, aids_) for aids_ in testres.cfgx2_qaids]
else:
_tmp1_cfgx2_infoprop = ut.get_list_column(testres.cfgx2_cmsinfo, key)
_tmp2_cfgx2_infoprop = list(map(
np.array,
ut.util_list.replace_nones(_tmp1_cfgx2_infoprop, np.nan)))
if qaids is None:
cfgx2_infoprop = _tmp2_cfgx2_infoprop
else:
# Use nan if the aid doesnt exist
cfgx2_qaid2_qx = [dict(zip(aids_, range(len(aids_))))
for aids_ in testres.cfgx2_qaids]
qxs_list = [ut.dict_take(qaid2_qx , qaids, None)
for qaid2_qx in cfgx2_qaid2_qx]
cfgx2_infoprop = [
[np.nan if x is None else props[x] for x in qxs]
for props, qxs in zip(_tmp2_cfgx2_infoprop, qxs_list)]
if key == 'qx2_gt_rank' or key.endswith('_rank'):
# hack
wpr = testres.get_worst_possible_rank()
cfgx2_infoprop = [np.array([wpr if rank == -1 else rank
for rank in infoprop])
for infoprop in cfgx2_infoprop]
return cfgx2_infoprop
def get_infoprop_mat(testres, key, qaids=None):
"""
key = 'qx2_gf_raw_score'
key = 'qx2_gt_raw_score'
"""
cfgx2_infoprop = testres.get_infoprop_list(key, qaids)
# concatenate each query rank across configs
infoprop_mat = np.vstack(cfgx2_infoprop).T
return infoprop_mat
@ut.memoize
def get_rank_mat(testres, qaids=None):
# Ranks of Best Results
rank_mat = testres.get_infoprop_mat(key='qx2_gt_rank', qaids=qaids)
return rank_mat
def get_worst_possible_rank(testres):
#worst_possible_rank = max(9001, len(testres.daids) + 1)
worst_possible_rank = max([len(qreq_.daids) for qreq_ in testres.cfgx2_qreq_]) + 1
#worst_possible_rank = len(testres.daids) + 1
return worst_possible_rank
def get_rank_histograms(testres, bins=None, key=None, join_acfgs=False):
"""
Ignore:
testres.get_infoprop_mat('qnx2_gt_name_rank')
testres.get_infoprop_mat('qnx2_gf_name_rank')
testres.get_infoprop_mat('qnx2_qnid')
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> from ibeis.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts('testdb1', a=['default'])
>>> bins = 'dense'
>>> key = 'qnx2_gt_name_rank'
>>> config_hists = testres.get_rank_histograms(bins, key=key)
"""
if key is None:
key = 'qx2_gt_rank'
#key = 'qnx2_gt_name_rank'
if bins is None:
bins = testres.get_rank_histogram_bins()
elif bins == 'dense':
bins = np.arange(testres.get_worst_possible_rank() + 1)
cfgx2_ranks = testres.get_infoprop_list(key=key)
# Use numpy histogram repr
cfgx2_hist = np.zeros((len(cfgx2_ranks), len(bins) - 1), dtype=np.int32)
for cfgx, ranks in enumerate(cfgx2_ranks):
freq = np.histogram(ranks, bins=bins)[0]
cfgx2_hist[cfgx] = freq
if join_acfgs:
# Hack for turtles / general way of doing cross validation
# however, we need to change the name
groupxs = testres.get_cfgx_groupxs()
cfgx2_hist = np.array([
np.sum(group, axis=0)
for group in ut.apply_grouping(cfgx2_hist, groupxs)
])
return cfgx2_hist, bins
def get_rank_percentage_cumhist(testres, bins='dense', key=None,
join_acfgs=False):
r"""
Args:
bins (unicode): (default = u'dense')
key (None): (default = None)
join_acfgs (bool): (default = False)
Returns:
tuple: (config_cdfs, edges)
CommandLine:
python -m ibeis --tf TestResult.get_rank_percentage_cumhist
python -m ibeis --tf TestResult.get_rank_percentage_cumhist \
-t baseline -a unctrl ctrl
python -m ibeis --tf TestResult.get_rank_percentage_cumhist \
--db lynx \
-a default:qsame_imageset=True,been_adjusted=True,excluderef=True \
-t default:K=1 --show --cmd
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> from ibeis.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts(
>>> 'testdb1', a=['default:num_names=1,name_offset=[0,1]'])
>>> bins = u'dense'
>>> key = None
>>> (config_cdfs, edges) = testres.get_rank_percentage_cumhist(bins)
>>> result = ('(config_cdfs, edges) = %s' % (str((config_cdfs, edges)),))
>>> print(result)
"""
cfgx2_hist, edges = testres.get_rank_histograms(
bins, key=key, join_acfgs=join_acfgs)
cfgx2_cumhist = np.cumsum(cfgx2_hist, axis=1)
cfgx2_cumhist_percent = 100 * cfgx2_cumhist / cfgx2_cumhist.T[-1].T[:, None]
return cfgx2_cumhist_percent, edges
def get_cfgx_groupxs(testres):
r"""
Returns the group indices of configurations specified to be joined.
Ignore:
a = [
'default:minqual=good,require_timestamp=True,view=left,crossval_enc=True,joinme=1',
'default:minqual=good,require_timestamp=True,view=right,crossval_enc=True,joinme=1',
'default:minqual=ok,require_timestamp=True,view=left,crossval_enc=True,joinme=2',
'default:minqual=ok,require_timestamp=True,view=right,crossval_enc=True,joinme=2',
]
>>> a = [
>>> 'default:minqual=good,require_timestamp=True,view=left,crossval_enc=True,joinme=1',
>>> 'default:minqual=good,require_timestamp=True,view=right,crossval_enc=True,joinme=1',
>>> 'default:minqual=ok,require_timestamp=True,view=left,crossval_enc=True,joinme=2',
>>> 'default:minqual=ok,require_timestamp=True,view=right,crossval_enc=True,joinme=2',
>>> ]
>>> from ibeis.init import main_helpers
>>> #a = 'default:minqual=good,require_timestamp=True,crossval_enc=True,view=[right,left]'
>>> t = 'default:K=[1]'
>>> ibs, testres = main_helpers.testdata_expts('WWF_Lynx_Copy', a=a, t=t)
>>> testres.get_cfgx_groupxs()
ut.lmap(sum, ut.apply_grouping([len(ut.unique(ibs.annots(aids).nids)) for aids in testres.cfgx2_qaids], testres.get_cfgx_groupxs()))
ut.lmap(sum, ut.apply_grouping([len(ut.unique(ibs.annots(aids))) for aids in testres.cfgx2_qaids], testres.get_cfgx_groupxs()))
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> from ibeis.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts(
>>> 'PZ_MTEST',
>>> a=['default:qnum_names=1,qname_offset=[0,1],joinme=1,dpername=1',
>>> 'default:qsize=1,dpername=[1,2]'],
>>> t=['default:K=[1,2]'])
>>> groupxs = testres.get_cfgx_groupxs()
>>> result = groupxs
>>> print(result)
[[6], [4], [0, 2], [7], [5], [1, 3]]
"""
# Group-ids for annotations are determined by joinme labels
# (used primarilly in cross-validation)
acfg_joinid = [acfg['qcfg']['joinme'] for acfg in testres.cfgx2_acfg]
# Anything that does not have a joinme groupid is standalone and must
# be given a unique groupid
gen_groupid = it.count(-1, step=-1)
acfg_groupids = [next(gen_groupid) if grpid is None else grpid
for grpid in acfg_joinid]
# Ensure that different pipeline configs are in different groups
pcfg_groupids = ut.get_varied_cfg_lbls(testres.cfgx2_pcfg)
cfg_groupids = list(zip(pcfg_groupids, acfg_groupids))
groupxs = ut.group_indices(cfg_groupids)[1]
return groupxs
def get_rank_histogram_bins(testres):
""" easy to see histogram bins """
worst_possible_rank = testres.get_worst_possible_rank()
if worst_possible_rank > 50:
bins = [0, 1, 5, 50, worst_possible_rank, worst_possible_rank + 1]
elif worst_possible_rank > 5:
bins = [0, 1, 5, worst_possible_rank, worst_possible_rank + 1]
else:
bins = [0, 1, 5]
return bins
def get_X_LIST(testres):
""" DEPRICATE or refactor """
#X_LIST = ut.get_argval('--rank-lt-list', type_=list, default=[1])
X_LIST = ut.get_argval('--rank-lt-list', type_=list, default=[1, 5])
return X_LIST
def get_nLessX_dict(testres):
"""
Build a (histogram) dictionary mapping X (as in #ranks < X) to a list
of cfg scores
"""
X_LIST = testres.get_X_LIST()
nLessX_dict = {int(X): np.zeros(testres.nConfig) for X in X_LIST}
cfgx2_qx2_gt_rank = testres.get_infoprop_list('qx2_gt_rank')
for X in X_LIST:
cfgx2_lessX_mask = [
np.logical_and(0 <= qx2_gt_ranks, qx2_gt_ranks < X)
for qx2_gt_ranks in cfgx2_qx2_gt_rank]
cfgx2_nLessX = np.array([lessX_.sum(axis=0)
for lessX_ in cfgx2_lessX_mask])
nLessX_dict[int(X)] = cfgx2_nLessX
return nLessX_dict
def get_all_varied_params(testres):
r"""
Returns the parameters that were varied between different
configurations in this test
Returns:
list: varied_params
CommandLine:
python -m ibeis TestResult.get_all_varied_params
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> import ibeis
>>> testres = ibeis.testdata_expts(
>>> 'PZ_MTEST', t='default:K=[1,2]')[1]
>>> varied_params = sorted(testres.get_all_varied_params())
>>> result = ('varied_params = %s' % (ut.repr2(varied_params),))
>>> print(result)
varied_params = ['K', '_cfgindex']
"""
# only for big results
varied_cfg_params = list(set(ut.flatten(
[cfgdict.keys()
for cfgdict in testres.varied_cfg_list])))
varied_acfg_params = list(set(ut.flatten([
acfg.keys()
for acfg in testres.varied_acfg_list])))
varied_params = varied_acfg_params + varied_cfg_params
return varied_params
def get_total_num_varied_params(testres):
return len(testres.get_all_varied_params())
def get_param_basis(testres, key):
"""
Returns what a param was varied between over all tests
key = 'K'
key = 'dcfg_sample_size'
"""
if key == 'len(daids)':
basis = sorted(list(set([len(daids)
for daids in testres.cfgx2_daids])))
elif any([key in cfgdict for cfgdict in testres.varied_cfg_list]):
basis = sorted(list(set([
cfgdict[key]
for cfgdict in testres.varied_cfg_list])))
elif any([key in cfgdict for cfgdict in testres.varied_acfg_list]):
basis = sorted(list(set([
acfg[key]
for acfg in testres.varied_acfg_list])))
else:
#assert False, 'param is not varied'
if key in testres.common_cfgdict:
basis = [testres.common_cfgdict[key]]
elif key in testres.nonvaried_acfg:
basis = [testres.nonvaried_acfg[key]]
else:
assert False, 'param=%r doesnt exist' % (key,)
return basis
def get_param_val_from_cfgx(testres, cfgx, key):
if key == 'len(daids)':
return len(testres.cfgx2_daids[cfgx])
# --- HACK - the keys are different in varied dict for some reason ---
elif any([key in cfgdict for cfgdict in testres.varied_cfg_list]):
return testres.varied_cfg_list[cfgx][key]
elif any([key in cfgdict for cfgdict in testres.varied_acfg_list]):
return testres.varied_acfg_list[cfgx][key]
# --- / Hack
elif any([key in cfgdict for cfgdict in testres.cfgx2_pcfg]):
return testres.cfgx2_pcfg[cfgx][key]
elif any([key in cfgdict for cfgdict in testres.cfgx2_acfg]):
return testres.cfgx2_acfg[cfgx][key]
else:
assert False, 'param=%r doesnt exist' % (key,)
def get_cfgx_with_param(testres, key, val):
"""
Gets configs where the given parameter is held constant
"""
if key == 'len(daids)':
cfgx_list = [cfgx for cfgx, daids in enumerate(testres.cfgx2_daids)
if len(daids) == val]
elif any([key in cfgdict for cfgdict in testres.varied_cfg_list]):
cfgx_list = [cfgx for cfgx, cfgdict in enumerate(testres.varied_cfg_list)
if cfgdict[key] == val]
elif any([key in cfgdict for cfgdict in testres.varied_acfg_list]):
cfgx_list = [cfgx for cfgx, acfg in enumerate(testres.varied_acfg_list)
if acfg[key] == val]
else:
if key in testres.common_cfgdict:
cfgx_list = list(range(testres.nConfig))
elif key in testres.nonvaried_acfg:
cfgx_list = list(range(testres.nConfig))
else:
assert False, 'param=%r doesnt exist' % (key,)
#assert False, 'param is not varied'
return cfgx_list
def get_pipecfg_args(testres):
if '_cfgstr' in testres.common_cfgdict:
pipecfg_args = [testres.common_cfgdict['_cfgstr']]
else:
pipecfg_args = ut.unique_ordered(
[cfg['_cfgstr'] for cfg in testres.varied_cfg_list])
return ' ' .join(pipecfg_args)
def get_annotcfg_args(testres):
"""
CommandLine:
# TODO: More robust fix
# To reproduce the error
ibeis -e rank_cmc --db humpbacks_fb -a default:mingt=2,qsize=10,dsize=100 default:qmingt=2,qsize=10,dsize=100 -t default:proot=BC_DTW,decision=max,crop_dim_size=500,crop_enabled=True,manual_extract=False,use_te_scorer=True,ignore_notch=True,te_score_weight=0.5 --show
"""
if '_cfgstr' in testres.common_acfg['common']:
annotcfg_args = [testres.common_acfg['common']['_cfgstr']]
else:
try:
annotcfg_args = ut.unique_ordered([
acfg['common']['_cfgstr']
for acfg in testres.varied_acfg_list])
except KeyError:
# HACK FIX
try:
annotcfg_args = ut.unique_ordered([
acfg['_cfgstr']
for acfg in testres.varied_acfg_list])
except KeyError:
annotcfg_args = ut.unique_ordered([
acfg['qcfg__cfgstr']
for acfg in testres.varied_acfg_list])
return ' ' .join(annotcfg_args)
def reconstruct_test_flags(testres):
flagstr = ' '.join([
'-a ' + testres.get_annotcfg_args(),
'-t ' + testres.get_pipecfg_args(),
'--db ' + testres.ibs.get_dbname()
])
return flagstr
def get_full_cfgstr(testres, cfgx):
""" both qannots and dannots included """
full_cfgstr = testres.cfgx2_qreq_[cfgx].get_full_cfgstr()
return full_cfgstr
@ut.memoize
def get_cfgstr(testres, cfgx):
""" just dannots and config_str """
cfgstr = testres.cfgx2_qreq_[cfgx].get_cfgstr()
return cfgstr
def _shorten_lbls(testres, lbl):
"""
hacky function
"""
import re
repl_list = [
('candidacy_', ''),
('viewpoint_compare', 'viewpoint'),
#('custom', 'default'),
#('fg_on', 'FG'),
#('fg_on=True', 'FG'),
#('fg_on=False,?', ''),
('fg_on=True', 'FG=True'),
('fg_on=False,?', 'FG=False'),
('lnbnn_on=True', 'LNBNN'),
('lnbnn_on=False,?', ''),
('normonly_on=True', 'normonly'),
('normonly_on=False,?', ''),
('bar_l2_on=True', 'dist'),
('bar_l2_on=False,?', ''),
('joinme=\d+,?', ''),
('dcrossval_enc', 'denc_per_name'),
('sv_on', 'SV'),
('rotation_invariance', 'RI'),
('affine_invariance', 'AI'),
('query_rotation_heuristic', 'QRH'),
('nNameShortlistSVER', 'nRR'),
#
#('sample_per_ref_name', 'per_ref_name'),
('sample_per_ref_name', 'per_gt_name'),
('require_timestamp=True', 'require_timestamp'),
('require_timestamp=False,?', ''),
('require_timestamp=None,?', ''),
('[_A-Za-z]*=None,?', ''),
('dpername=None,?', ''),
#???
#('sample_per_ref_name', 'per_gt_name'),
#('per_name', 'per_gf_name'), # Try to make labels clearer for paper
#----
#('prescore_method=\'?csum\'?,score_method=\'?csum\'?,?', 'amech'),
#('prescore_method=\'?nsum\'?,score_method=\'?nsum\'?,?', 'fmech'),
('prescore_method=\'?csum\'?,score_method=\'?csum\'?,?', 'mech=annot'),
('prescore_method=\'?nsum\'?,score_method=\'?nsum\'?,?', 'mech=name'),
('force_const_size=[^,]+,?', ''),
(r'[dq]?_true_size=\d+,?', ''),
(r'[dq]?_orig_size=[^,]+,?', ''),
# Hack
('[qd]?exclude_reference=' + ut.regex_or(['True', 'False', 'None']) + '\,?', ''),
#('=True', '=On'),
#('=False', '=Off'),
('=True', '=T'),
('=False', '=F'),
(',$', ''),
]
for ser, rep in repl_list:
lbl = re.sub(ser, rep, lbl)
return lbl
def get_short_cfglbls(testres, join_acfgs=False):
"""
Labels for published tables
cfg_lbls = ['baseline:nRR=200+default:', 'baseline:+default:']
CommandLine:
python -m ibeis --tf TestResult.get_short_cfglbls
Example:
>>> # SLOW_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> import ibeis
>>> ibs, testres = ibeis.testdata_expts('PZ_MTEST', a=['ctrl:size=10'],
>>> t=['default:dim_size=[450,550]'])
>>> cfg_lbls = testres.get_short_cfglbls()
>>> result = ('cfg_lbls = %s' % (ut.repr2(cfg_lbls),))
>>> print(result)
cfg_lbls = [
'default:dim_size=450+ctrl',
'default:dim_size=550+ctrl',
]
"""
from ibeis.expt import annotation_configs
if False:
acfg_names = [acfg['qcfg']['_cfgstr'] for acfg in testres.cfgx2_acfg]
pcfg_names = [pcfg['_cfgstr'] for pcfg in testres.cfgx2_pcfg]
# Only vary the label settings within the cfgname
acfg_hashes = np.array(list(map(hash, acfg_names)))
unique_hashes, a_groupxs = vt.group_indices(acfg_hashes)
a_label_groups = []
for groupx in a_groupxs:
acfg_list = ut.take(testres.cfgx2_acfg, groupx)
varied_lbls = annotation_configs.get_varied_acfg_labels(
acfg_list, mainkey='_cfgstr')
a_label_groups.append(varied_lbls)
acfg_lbls = vt.invert_apply_grouping(a_label_groups, a_groupxs)
pcfg_hashes = np.array(list(map(hash, pcfg_names)))
unique_hashes, p_groupxs = vt.group_indices(pcfg_hashes)
p_label_groups = []
for groupx in p_groupxs:
pcfg_list = ut.take(testres.cfgx2_pcfg, groupx)
varied_lbls = ut.get_varied_cfg_lbls(pcfg_list, mainkey='_cfgstr')
p_label_groups.append(varied_lbls)
pcfg_lbls = vt.invert_apply_grouping(p_label_groups, p_groupxs)
cfg_lbls = [albl + '+' + plbl for albl, plbl in zip(acfg_lbls, pcfg_lbls)]
else:
cfg_lbls_ = testres.cfgx2_lbl[:]
cfg_lbls_ = [testres._shorten_lbls(lbl) for lbl in cfg_lbls_]
# split configs up by param and annots
pa_tups = [lbl.split('+') for lbl in cfg_lbls_]
cfg_lbls = []
for pa in pa_tups:
new_parts = []
for part in pa:
_tup = part.split(ut.NAMEVARSEP)
name, settings = _tup if len(_tup) > 1 else (_tup[0], '')
new_parts.append(part if settings else name)
if len(new_parts) == 2 and new_parts[1] == 'default':
newlbl = new_parts[0]
else:
newlbl = '+'.join(new_parts)
cfg_lbls.append(newlbl)
if join_acfgs:
groupxs = testres.get_cfgx_groupxs()
group_lbls = []
for group in ut.apply_grouping(cfg_lbls, groupxs):
num_parts = 0
part_dicts = []
for lbl in group:
parts = []
for count, pa in enumerate(lbl.split('+')):
num_parts = max(num_parts, count + 1)
cfgdict = cfghelpers.parse_cfgstr_list2([pa], strict=False)[0][0]
parts.append(cfgdict)
part_dicts.append(parts)
group_lbl_parts = []
for px in range(num_parts):
cfgs = ut.take_column(part_dicts, px)
nonvaried_cfg = ut.partition_varied_cfg_list(cfgs)[0]
group_lbl_parts.append(ut.get_cfg_lbl(nonvaried_cfg))
# print('nonvaried_lbl = %r' % (nonvaried_lbl,))
group_lbl = '+'.join(group_lbl_parts)
group_lbls.append(group_lbl)
cfg_lbls = group_lbls
return cfg_lbls
def get_varied_labels(testres, shorten=False, join_acfgs=False, sep=''):
"""
Returns labels indicating only the parameters that have been varied between
different annot/pipeline configurations.
Helper for consistent figure titles
CommandLine:
python -m ibeis --tf TestResult.make_figtitle --prefix "Seperability " --db GIRM_Master1 -a timectrl -t Ell:K=2 --hargv=scores
python -m ibeis --tf TestResult.make_figtitle
python -m ibeis TestResult.get_varied_labels
Example:
>>> # SLOW_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> import ibeis
>>> ibs, testres = ibeis.testdata_expts(
>>> 'PZ_MTEST', t='default:K=[1,2]',
>>> #a=['timectrl:qsize=[1,2],dsize=[3,4]']
>>> a=[
>>> 'default:qsize=[1,2],dsize=2,joinme=1,view=left',
>>> 'default:qsize=2,dsize=3,joinme=1,view=primary',
>>> 'default:qsize=[3,2],dsize=4,joinme=2,view=left',
>>> 'default:qsize=4,dsize=5,joinme=2,view=primary',
>>> ]
>>> )
>>> # >>> ibs, testres = ibeis.testdata_expts(
>>> # >>> 'WWF_Lynx_Copy', t='default:K=1',
>>> # >>> a=[
>>> # >>> 'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=1,joinme=1',
>>> # >>> 'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=2,joinme=2',
>>> # >>> #'default:minqual=good,require_timestamp=True,view=left,dcrossval_enc=3,joinme=3',
>>> # >>> 'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=1,joinme=1',
>>> # >>> 'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=2,joinme=2',
>>> # >>> #'default:minqual=good,require_timestamp=True,view=right,dcrossval_enc=3,joinme=3',
>>> # >>> ]
>>> # >>> )
>>> varied_lbls = testres.get_varied_labels(shorten=False, join_acfgs=True)
>>> result = ('varied_lbls = %s' % (ut.repr2(varied_lbls, strvals=True, nl=2),))
>>> print(result)
varied_lbls = [u'K=1+qsize=1', u'K=2+qsize=1', u'K=1+qsize=2', u'K=2+qsize=2']
"""
from ibeis.expt import annotation_configs
varied_acfgs = annotation_configs.get_varied_acfg_labels(
testres.cfgx2_acfg, checkname=True)
# print('varied_acfgs = %s' % (ut.repr2(varied_acfgs, nl=2),))
# print('testres.cfgx2_acfg = %s' % (ut.repr3(testres.cfgx2_acfg),))
varied_pcfgs = ut.get_varied_cfg_lbls(testres.cfgx2_pcfg, checkname=True)
#varied_acfgs = ut.get_varied_cfg_lbls(testres.cfgx2_acfg, checkname=True)
name_sep = ':'
cfg_sep = '+'
if join_acfgs:
# Hack for the grouped config problem
new_varied_acfgs = []
groupxs = testres.get_cfgx_groupxs()
grouped_acfgs = ut.apply_grouping(varied_acfgs, groupxs)
grouped_pcfgs = ut.apply_grouping(varied_pcfgs, groupxs)
for group in grouped_acfgs:
group = [p if name_sep in p else name_sep + p for p in group]
# Re-parse given back into dictionary form
cfgdicts_ = cfghelpers.parse_cfgstr_list2(group, strict=False)
# I forget why these are stored in a 2d-list
cfgdicts = ut.take_column(cfgdicts_, 0)
new_acfgs = ut.partition_varied_cfg_list(cfgdicts)
# Hack, just taking the first one that has agreement between
# joinme / crossvalidation runs
new_acfg = new_acfgs[0]
if True:
# look at internal variance within xval runs
internal_cfgs = new_acfgs[1]
import pandas as pd
intern_variations = pd.DataFrame.from_dict(internal_cfgs).to_dict(orient='list')
op_prefixes = {
'sum': (np.sum, 'Σ-', ''),
'mean': (np.mean, 'µ-', ''),
'set': (lambda x: '&'.join(set(map(six.text_type, x))), '', 's'),
}
known_modes = {
'dsize': 'mean',
'qsize': 'sum',
'view': 'set',
}
for key in intern_variations.keys():
if key.startswith('_'):
continue
mode = known_modes.get(key, None)
vals = intern_variations[key]
if mode is None:
mode = 'set'
if key == 'crossval_idx':
new_acfg['folds'] = len(intern_variations['crossval_idx'])
else:
op, pref, suff = op_prefixes[mode]
c = op(vals)
if isinstance(c, six.string_types):
new_acfg[pref + key + suff] = c
else:
new_acfg[pref + key + suff] = ut.repr2(c, precision=2)
# if 'dsize' in intern_variations:
# new_acfg['µ-dsize'] = np.sum(intern_variations['dsize'])
# if 'qsize' in intern_variations:
# new_acfg['Σ-qsize'] = np.sum(intern_variations['qsize'])
# if 'view' in intern_variations:
# new_acfg['views'] = '&'.join(set(intern_variations['view']))
# if 'crossval_idx' in intern_variations:
# new_acfg['folds'] = len(intern_variations['crossval_idx'])
new_varied_acfgs.append(new_acfg)
# Do one more dup check to remove the duplicate summaries
common_new_acfg = ut.partition_varied_cfg_list(new_varied_acfgs)[0]
for key in common_new_acfg.keys():
if not key.startswith('_'):
for new_acfg in new_varied_acfgs:
del new_acfg[key]
varied_pcfgs = ut.take_column(grouped_pcfgs, 0)
varied_acfgs = [ut.get_cfg_lbl(new_acfg_, with_name=False, sep=sep)
for new_acfg_ in new_varied_acfgs]
def combo_lbls(lbla, lblp):
parts = []
if lbla != name_sep and lbla:
parts.append(lbla)
if lblp != name_sep and lblp:
parts.append(lblp)
return (sep + cfg_sep).join(parts)
varied_lbls = [combo_lbls(lbla, lblp) for lblp, lbla in zip(varied_acfgs, varied_pcfgs)]
if shorten:
varied_lbls = [testres._shorten_lbls(lbl) for lbl in varied_lbls]
return varied_lbls
def get_sorted_config_labels(testres):
"""
helper
"""
key = 'qx2_gt_rank'
cfgx2_cumhist_percent, edges = testres.get_rank_percentage_cumhist(bins='dense', key=key)
label_list = testres.get_short_cfglbls()
label_list = [
('%6.2f%%' % (percent,)) +
#ut.scalar_str(percent, precision=2)
' - ' + label
for percent, label in zip(cfgx2_cumhist_percent.T[0], label_list)]
sortx = cfgx2_cumhist_percent.T[0].argsort()[::-1]
label_list = ut.take(label_list, sortx)
return label_list
def make_figtitle(testres, plotname='', filt_cfg=None):
"""
Helper for consistent figure titles
CommandLine:
python -m ibeis --tf TestResult.make_figtitle --prefix "Seperability " --db GIRM_Master1 -a timectrl -t Ell:K=2 --hargv=scores
python -m ibeis --tf TestResult.make_figtitle
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> import ibeis
>>> ibs, testres = ibeis.testdata_expts('PZ_MTEST')
>>> plotname = ''
>>> figtitle = testres.make_figtitle(plotname)
>>> result = ('figtitle = %r' % (figtitle,))
>>> print(result)
"""
figtitle_prefix = ut.get_argval('--prefix', type_=str, default='')
if figtitle_prefix != '':
figtitle_prefix = figtitle_prefix.rstrip() + ' '
figtitle = (figtitle_prefix + plotname)
hasprefix = figtitle_prefix == ''
if hasprefix:
figtitle += '\n'
title_aug = testres.get_title_aug(friendly=True, with_cfg=hasprefix)
figtitle += ' ' + title_aug
if filt_cfg is not None:
filt_cfgstr = ut.get_cfg_lbl(filt_cfg)
if filt_cfgstr.strip() != ':':
figtitle += ' ' + filt_cfgstr
return figtitle
def get_title_aug(testres, with_size=True, with_db=True, with_cfg=True,
friendly=False):
r"""
Args:
with_size (bool): (default = True)
Returns:
str: title_aug
CommandLine:
python -m ibeis --tf TestResult.get_title_aug --db PZ_Master1 -a timequalctrl::timectrl
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> import ibeis
>>> ibs, testres = ibeis.testdata_expts('PZ_MTEST')
>>> with_size = True
>>> title_aug = testres.get_title_aug(with_size)
>>> res = u'title_aug = %s' % (title_aug,)
>>> print(res)
"""
ibs = testres.ibs
title_aug = ''
if with_db:
title_aug += 'db=' + (ibs.get_dbname())
if with_cfg:
try:
if '_cfgname' in testres.common_acfg['common']:
try:
annot_cfgname = testres.common_acfg['common']['_cfgstr']
except KeyError:
annot_cfgname = testres.common_acfg['common']['_cfgname']
else:
cfgname_list = [cfg['dcfg__cfgname']
for cfg in testres.varied_acfg_list]
cfgname_list = ut.unique_ordered(cfgname_list)
annot_cfgname = '[' + ','.join(cfgname_list) + ']'
try:
pipeline_cfgname = testres.common_cfgdict['_cfgstr']
except KeyError:
#pipeline_cfgname = testres.common_cfgdict['_cfgname']
cfgstr_list = [cfg['_cfgstr'] for cfg in testres.varied_cfg_list]
uniuqe_cfgstrs = ut.unique_ordered(cfgstr_list)
pipeline_cfgname = '[' + ','.join(uniuqe_cfgstrs) + ']'
annot_cfgname = testres._shorten_lbls(annot_cfgname)
pipeline_cfgname = testres._shorten_lbls(pipeline_cfgname)
# hack turn these off if too long
if len(annot_cfgname) < 64:
title_aug += ' a=' + annot_cfgname
if len(pipeline_cfgname) < 64:
title_aug += ' t=' + pipeline_cfgname
except Exception as ex:
print(ut.repr2(testres.common_acfg))
print(ut.repr2(testres.common_cfgdict))
ut.printex(ex)
raise
if with_size:
if ut.get_argflag('--hack_size_nl'):
title_aug += '\n'
if testres.has_constant_qaids():
title_aug += ' #qaids=%r' % (len(testres.qaids),)
elif testres.has_constant_length_qaids():
title_aug += ' #qaids=%r*' % (len(testres.cfgx2_qaids[0]),)
if testres.has_constant_daids():
daids = testres.cfgx2_daids[0]
title_aug += ' #daids=%r' % (len(testres.cfgx2_daids[0]),)
if testres.has_constant_qaids():
all_daid_per_name_stats = ut.get_stats(
ibs.get_num_annots_per_name(daids)[0], use_nan=True)
if all_daid_per_name_stats['std'] == 0:
title_aug += ' dper_name=%s' % (
ut.scalar_str(all_daid_per_name_stats['mean'],
max_precision=2),)
else:
title_aug += ' dper_name=%s±%s' % (
ut.scalar_str(all_daid_per_name_stats['mean'], precision=2),
ut.scalar_str(all_daid_per_name_stats['std'], precision=2),)
elif testres.has_constant_length_daids():
daids = testres.cfgx2_daids[0]
title_aug += ' #daids=%r*' % (len(testres.cfgx2_daids[0]),)
if friendly:
# Hackiness for friendliness
#title_aug = title_aug.replace('db=PZ_Master1', 'Plains Zebras')
#title_aug = title_aug.replace('db=NNP_MasterGIRM_core', 'Masai Giraffes')
#title_aug = title_aug.replace('db=GZ_ALL', 'Grevy\'s Zebras')
title_aug = ut.multi_replace(
title_aug,
list(ibs.const.DBNAME_ALIAS.keys()),
list(ibs.const.DBNAME_ALIAS.values()))
#title_aug = title_aug.replace('db=PZ_Master1', 'db=PZ')
#title_aug = title_aug.replace('db=NNP_MasterGIRM_core', 'Masai Giraffes')
#title_aug = title_aug.replace('db=GZ_ALL', 'Grevy\'s Zebras')
return title_aug
def get_fname_aug(testres, **kwargs):
import re
title_aug = testres.get_title_aug(**kwargs)
valid_regex = '-a-zA-Z0-9_.() '
valid_extra = '=,'
valid_regex += valid_extra
title_aug = title_aug.replace(' ', '_') # spaces suck
fname_aug = re.sub('[^' + valid_regex + ']+', '', title_aug)
fname_aug = fname_aug.strip('_')
return fname_aug
def print_pcfg_info(testres):
"""
Prints verbose information about each pipeline configuration
>>> from ibeis.expt.test_result import * # NOQA
"""
# TODO: Rectify with other printers
# for pcfgx, (pipecfg, lbl) in enumerate(zip(pipecfg_list, pipecfg_lbls)):
# print('+--- %d / %d ===' % (pcfgx, (len(pipecfg_list))))
# ut.colorprint(lbl, 'white')
# print(pipecfg.get_cfgstr())
# print('L___')
# for qreq_ in testres.cfgx2_qreq_:
# print(qreq_.get_full_cfgstr())
# cfgdict_list = [qreq_.qparams for qreq_ in testres.cfgx2_qreq_]
experiment_helpers.print_pipe_configs(testres.cfgx2_pcfg, testres.cfgx2_qreq_)
def print_acfg_info(testres, **kwargs):
"""
Prints verbose information about the annotations used in each test
configuration
CommandLine:
python -m ibeis --tf TestResult.print_acfg_info
Kwargs:
see ibs.get_annot_stats_dict
hashid, per_name, per_qual, per_vp, per_name_vpedge, per_image,
min_name_hourdist
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> import ibeis
>>> ibs, testres = ibeis.testdata_expts('PZ_MTEST',
>>> a=['ctrl::unctrl_comp'],
>>> t=['candk:K=[1,2]'])
>>> ibs = None
>>> result = testres.print_acfg_info()
>>> print(result)
"""
from ibeis.expt import annotation_configs
ibs = testres.ibs
# Get unique annotation configs
cfgx2_acfg_label = annotation_configs.get_varied_acfg_labels(testres.cfgx2_acfg)
flags = ut.flag_unique_items(cfgx2_acfg_label)
qreq_list = ut.compress(testres.cfgx2_qreq_, flags)
acfg_list = ut.compress(testres.cfgx2_acfg, flags)
expanded_aids_list = [(qreq_.qaids, qreq_.daids) for qreq_ in qreq_list]
annotation_configs.print_acfg_list(acfg_list, expanded_aids_list, ibs, **kwargs)
def print_unique_annot_config_stats(testres, ibs=None):
r"""
Args:
ibs (IBEISController): ibeis controller object(default = None)
CommandLine:
python -m ibeis TestResult.print_unique_annot_config_stats
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> import ibeis
>>> testres = ibeis.testdata_expts('PZ_MTEST', a=['ctrl::unctrl_comp'])
>>> ibs = None
>>> result = testres.print_unique_annot_config_stats(ibs)
>>> print(result)
"""
if ibs is None:
ibs = testres.ibs
cfx2_dannot_hashid = [ibs.get_annot_hashid_visual_uuid(daids)
for daids in testres.cfgx2_daids]
unique_daids = ut.compress(testres.cfgx2_daids,
ut.flag_unique_items(cfx2_dannot_hashid))
with ut.Indenter('[acfgstats]'):
print('+====')
print('Printing %d unique annotconfig stats' % (len(unique_daids)))
common_acfg = testres.common_acfg
common_acfg['common'] = ut.dict_filter_nones(common_acfg['common'])
print('testres.common_acfg = ' + ut.repr2(common_acfg))
print('param_basis(len(daids)) = %r' % (
testres.get_param_basis('len(daids)'),))
for count, daids in enumerate(unique_daids):
print('+---')
print('acfgx = %r/%r' % (count, len(unique_daids)))
if testres.has_constant_qaids():
ibs.print_annotconfig_stats(testres.qaids, daids)
else:
ibs.print_annot_stats(daids, prefix='d')
print('L___')
def report(testres):
testres.print_results()
def print_results(testres, **kwargs):
r"""
CommandLine:
python -m ibeis --tf TestResult.print_results
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> from ibeis.expt import harness
>>> ibs, testres = harness.testdata_expts('PZ_MTEST')
>>> result = testres.print_results()
>>> print(result)
"""
from ibeis.expt import experiment_printres
ibs = testres.ibs
experiment_printres.print_results(ibs, testres, **kwargs)
def get_common_qaids(testres):
if not testres.has_constant_qaids():
# Get only cases the tests share for now
common_qaids = reduce(np.intersect1d, testres.cfgx2_qaids)
return common_qaids
else:
return testres.qaids
def get_all_qaids(testres):
all_qaids = np.array(ut.unique(ut.flatten(testres.cfgx2_qaids)))
return all_qaids
def get_test_qaids(testres):
# Transition fucntion
return testres.get_all_qaids()
# return testres.get_common_qaids()
# all_qaids = ut.unique(ut.flatten(testres.cfgx2_qaids))
# return all_qaids
def get_all_tags(testres):
r"""
CommandLine:
python -m ibeis --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :
python -m ibeis --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :min_gf_timedelta=24h
python -m ibeis --tf TestResult.get_all_tags --db PZ_Master1 --show --filt :min_gf_timedelta=24h,max_gt_rank=5
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> from ibeis.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts('PZ_Master1', a=['timectrl'])
>>> filt_cfg = main_helpers.testdata_filtcfg()
>>> case_pos_list = testres.case_sample2(filt_cfg)
>>> all_tags = testres.get_all_tags()
>>> selected_tags = ut.take(all_tags, case_pos_list.T[0])
>>> flat_tags = list(map(str, ut.flatten(ut.flatten(selected_tags))))
>>> print(ut.repr2(ut.dict_hist(flat_tags), key_order_metric='val'))
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> pt.word_histogram2(flat_tags, fnum=1, pnum=(1, 2, 1))
>>> pt.wordcloud(' '.join(flat_tags), fnum=1, pnum=(1, 2, 2))
>>> pt.set_figtitle(ut.get_cfg_lbl(filt_cfg))
>>> ut.show_if_requested()
"""
gt_tags = testres.get_gt_tags()
gf_tags = testres.get_gf_tags()
all_tags = [ut.list_zipflatten(*item) for item in zip(gf_tags, gt_tags)]
return all_tags
def get_gf_tags(testres):
r"""
Returns:
list: case_pos_list
CommandLine:
python -m ibeis --tf TestResult.get_gf_tags --db PZ_Master1 --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> from ibeis.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts('PZ_Master1', a=['timectrl'])
>>> filt_cfg = main_helpers.testdata_filtcfg()
>>> case_pos_list = testres.case_sample2(filt_cfg)
>>> gf_tags = testres.get_gf_tags()
"""
ibs = testres.ibs
truth2_prop, prop2_mat = testres.get_truth2_prop()
gf_annotmatch_rowids = truth2_prop['gf']['annotmatch_rowid']
gf_tags = ibs.unflat_map(ibs.get_annotmatch_case_tags, gf_annotmatch_rowids)
return gf_tags
def get_gt_tags(testres):
ibs = testres.ibs
truth2_prop, prop2_mat = testres.get_truth2_prop()
gt_annotmatch_rowids = truth2_prop['gt']['annotmatch_rowid']
gt_tags = ibs.unflat_map(ibs.get_annotmatch_case_tags, gt_annotmatch_rowids)
return gt_tags
def get_gt_annot_tags(testres):
ibs = testres.ibs
truth2_prop, prop2_mat = testres.get_truth2_prop()
gt_annot_tags = ibs.unflat_map(ibs.get_annot_case_tags, truth2_prop['gt']['aid'])
return gt_annot_tags
def get_query_annot_tags(testres):
# FIXME: will break with new config structure
ibs = testres.ibs
truth2_prop, prop2_mat = testres.get_truth2_prop()
unflat_qids = np.tile(testres.qaids[:, None], (len(testres.cfgx2_qaids)))
query_annot_tags = ibs.unflat_map(ibs.get_annot_case_tags, unflat_qids)
return query_annot_tags
def get_gtquery_annot_tags(testres):
gt_annot_tags = testres.get_gt_annot_tags()
query_annot_tags = testres.get_query_annot_tags()
both_tags = [[ut.flatten(t) for t in zip(*item)]
for item in zip(query_annot_tags, gt_annot_tags)]
return both_tags
def case_sample2(testres, filt_cfg, qaids=None, return_mask=False, verbose=None):
r"""
Filters individual test result cases based on how they performed, what
tags they had, and various other things.
Args:
filt_cfg (dict):
Returns:
list: case_pos_list (list of (qx, cfgx)) or isvalid mask
CommandLine:
python -m ibeis TestResult.case_sample2
python -m ibeis TestResult.case_sample2:0
python -m ibeis TestResult.case_sample2:1 --db GZ_ALL --filt :min_tags=1
python -m ibeis TestResult.case_sample2:1 --db PZ_Master1 --filt :min_gf_tags=1
python -m ibeis TestResult.case_sample2:2 --db PZ_Master1
Example0:
>>> # ENABLE_DOCTEST
>>> # The same results is achievable with different filter config settings
>>> from ibeis.expt.test_result import * # NOQA
>>> from ibeis.init import main_helpers
>>> verbose = True
>>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'])
>>> filt_cfg1 = {'fail': True}
>>> case_pos_list1 = testres.case_sample2(filt_cfg1)
>>> filt_cfg2 = {'min_gtrank': 1}
>>> case_pos_list2 = testres.case_sample2(filt_cfg2)
>>> filt_cfg3 = {'min_gtrank': 0}
>>> case_pos_list3 = testres.case_sample2(filt_cfg3)
>>> filt_cfg4 = {}
>>> case_pos_list4 = testres.case_sample2(filt_cfg4)
>>> assert np.all(case_pos_list1 == case_pos_list2), 'should be equiv configs'
>>> assert np.any(case_pos_list2 != case_pos_list3), 'should be diff configs'
>>> assert np.all(case_pos_list3 == case_pos_list4), 'should be equiv configs'
>>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'], t=['default:sv_on=[True,False]'])
>>> filt_cfg5 = filt_cfg1.copy()
>>> mask5 = testres.case_sample2(filt_cfg5, return_mask=True)
>>> case_pos_list5 = testres.case_sample2(filt_cfg5, return_mask=False)
>>> assert len(mask5.shape) == 2
>>> assert not np.all(mask5.T[0] == mask5.T[1])
>>> filt_cfg6 = {'fail': True, 'allcfg': True}
>>> mask6 = testres.case_sample2(filt_cfg6, return_mask=True)
>>> assert np.all(mask6.T[0] == mask6.T[1])
>>> print(mask5)
>>> print(case_pos_list5)
>>> filt_cfg = filt_cfg7 = {'disagree': True}
>>> case_pos_list7 = testres.case_sample2(filt_cfg7, verbose=verbose)
>>> print(case_pos_list7)
Example1:
>>> # SCRIPT
>>> from ibeis.expt.test_result import * # NOQA
>>> from ibeis.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'])
>>> filt_cfg = main_helpers.testdata_filtcfg()
>>> case_pos_list = testres.case_sample2(filt_cfg)
>>> result = ('case_pos_list = %s' % (str(case_pos_list),))
>>> print(result)
>>> # Extra stuff
>>> all_tags = testres.get_all_tags()
>>> selcted_tags = ut.take(all_tags, case_pos_list.T[0])
>>> print('selcted_tags = %r' % (selcted_tags,))
Example1:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> from ibeis.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts('PZ_MTEST', a=['ctrl'], t=['default:K=[1,2,3]'])
>>> ut.exec_funckw(testres.case_sample2, globals())
>>> filt_cfg = {'fail': True, 'min_gtrank': 1, 'max_gtrank': None, 'min_gf_timedelta': '24h'}
>>> ibs, testres = main_helpers.testdata_expts('humpbacks_fb', a=['default:has_any=hasnotch,mingt=2,qindex=0:300,dindex=0:300'], t=['default:proot=BC_DTW,decision=max,crop_dim_size=500,crop_enabled=True,manual_extract=False,use_te_scorer=True,ignore_notch=True,te_net=annot_simple', 'default:proot=vsmany'], qaid_override=[12])
>>> filt_cfg = ':disagree=True,index=0:8,min_gtscore=.00001,require_all_cfg=True'
>>> #filt_cfg = cfghelpers.parse_argv_cfg('--filt')[0]
>>> case_pos_list = testres.case_sample2(filt_cfg, verbose=True)
>>> result = ('case_pos_list = %s' % (str(case_pos_list),))
>>> print(result)
>>> # Extra stuff
>>> all_tags = testres.get_all_tags()
>>> selcted_tags = ut.take(all_tags, case_pos_list.T[0])
>>> print('selcted_tags = %r' % (selcted_tags,))
print('qaid = %r' % (qaid,))
print('qx = %r' % (qx,))
print('cfgxs = %r' % (cfgxs,))
# print testres info about this item
take_cfgs = ut.partial(ut.take, index_list=cfgxs)
take_qx = ut.partial(ut.take, index_list=qx)
truth_cfgs = ut.hmap_vals(take_qx, truth2_prop)
truth_item = ut.hmap_vals(take_cfgs, truth_cfgs, max_depth=1)
prop_cfgs = ut.hmap_vals(take_qx, prop2_mat)
prop_item = ut.hmap_vals(take_cfgs, prop_cfgs, max_depth=0)
print('truth2_prop[item] = ' + ut.repr3(truth_item, nl=2))
print('prop2_mat[item] = ' + ut.repr3(prop_item, nl=1))
"""
from ibeis.expt import cfghelpers
if verbose is None:
verbose = ut.NOT_QUIET
if verbose:
print('[testres] case_sample2')
if isinstance(filt_cfg, six.string_types):
filt_cfg = [filt_cfg]
if isinstance(filt_cfg, list):
_combos = cfghelpers.parse_cfgstr_list2(filt_cfg, strict=False)
filt_cfg = ut.flatten(_combos)[0]
if isinstance(filt_cfg, six.string_types):
_combos = cfghelpers.parse_cfgstr_list2([filt_cfg], strict=False)
filt_cfg = ut.flatten(_combos)[0]
if filt_cfg is None:
filt_cfg = {}
qaids = testres.get_test_qaids() if qaids is None else qaids
truth2_prop, prop2_mat = testres.get_truth2_prop(qaids)
ibs = testres.ibs
# Initialize isvalid flags to all true
# np.ones(prop2_mat['is_success'].shape, dtype=np.bool)
participates = prop2_mat['participates']
is_valid = participates.copy()
def unflat_tag_filterflags(tags_list, **kwargs):
from ibeis import tag_funcs
flat_tags, cumsum = ut.invertible_flatten2(tags_list)
flat_flags = tag_funcs.filterflags_general_tags(flat_tags, **kwargs)
flags = np.array(ut.unflatten2(flat_flags, cumsum))
return flags
UTFF = unflat_tag_filterflags
def cols_disagree(mat, val):
"""
is_success = prop2_mat['is_success']
"""
nCols = mat.shape[1]
sums = mat.sum(axis=1)
# Find out which rows have different values
disagree_flags1d = np.logical_and(sums > 0, sums < nCols)
disagree_flags2d = np.tile(disagree_flags1d[:, None], (1, nCols))
if not val:
# User asked for rows that agree
flags = np.logical_not(disagree_flags2d)
else:
flags = disagree_flags2d
return flags
def cfg_scoresep(mat, val, op):
"""
Compares scores between different configs
op = operator.ge
is_success = prop2_mat['is_success']
"""
#import scipy.spatial.distance as spdist
nCols = mat.shape[1]
pdistx = vt.pdist_indicies(nCols)
pdist_list = np.array([vt.safe_pdist(row) for row in mat])
flags_list = op(pdist_list, val)
colx_list = [np.unique(ut.flatten(ut.compress(pdistx, flags))) for flags in flags_list]
offsets = np.arange(0, nCols * len(mat), step=nCols)
idx_list = ut.flatten([colx + offset for colx, offset in zip(colx_list, offsets)])
mask = vt.index_to_boolmask(idx_list, maxval=offsets[-1] + nCols)
flags = mask.reshape(mat.shape)
return flags
# List of rules that can filter results
rule_list = [
('disagree', lambda val: cols_disagree(prop2_mat['is_failure'], val)),
('min_gt_cfg_scoresep', lambda val: cfg_scoresep(truth2_prop['gt']['score'], val, operator.ge)),
('fail', prop2_mat['is_failure']),
('success', prop2_mat['is_success']),
('min_gtrank', partial(operator.ge, truth2_prop['gt']['rank'])),
('max_gtrank', partial(operator.le, truth2_prop['gt']['rank'])),
('max_gtscore', partial(operator.le, truth2_prop['gt']['score'])),
('min_gtscore', partial(operator.ge, truth2_prop['gt']['score'])),
('min_gf_timedelta', partial(operator.ge, truth2_prop['gf']['timedelta'])),
('max_gf_timedelta', partial(operator.le, truth2_prop['gf']['timedelta'])),
# Tag filtering
# FIXME: will break with new config structure
('min_tags', lambda val: UTFF(testres.get_all_tags(), min_num=val)),
('max_tags', lambda val: UTFF(testres.get_all_tags(), max_num=val)),
('min_gf_tags', lambda val: UTFF(testres.get_gf_tags(), min_num=val)),
('max_gf_tags', lambda val: UTFF(testres.get_gf_tags(), max_num=val)),
('min_gt_tags', lambda val: UTFF(testres.get_gt_tags(), min_num=val)),
('max_gt_tags', lambda val: UTFF(testres.get_gt_tags(), max_num=val)),
('min_query_annot_tags', lambda val: UTFF(testres.get_query_annot_tags(), min_num=val)),
('min_gt_annot_tags', lambda val: UTFF(testres.get_gt_annot_tags(), min_num=val)),
('min_gtq_tags', lambda val: UTFF(testres.get_gtquery_annot_tags(), min_num=val)),
('max_gtq_tags', lambda val: UTFF(testres.get_gtquery_annot_tags(), max_num=val)),
('without_gf_tag', lambda val: UTFF(testres.get_gf_tags(), has_none=val)),
('without_gt_tag', lambda val: UTFF(testres.get_gt_tags(), has_none=val)),
('with_gf_tag', lambda val: UTFF(testres.get_gf_tags(), has_any=val)),
('with_gt_tag', lambda val: UTFF(testres.get_gt_tags(), has_any=val)),
('with_tag', lambda val: UTFF(testres.get_all_tags(), has_any=val)),
('without_tag', lambda val: UTFF(testres.get_all_tags(), has_none=val)),
]
rule_dict = ut.odict(rule_list)
rule_list.append(('max_gf_td', rule_dict['max_gf_timedelta']))
rule_list.append(('min_gf_td', rule_dict['min_gf_timedelta']))
filt_cfg_ = copy.deepcopy(filt_cfg)
# hack to convert to seconds
for tdkey in filt_cfg_.keys():
#timedelta_keys = ['min_gf_timedelta', 'max_gf_timedelta']
#for tdkey in timedelta_keys:
if tdkey.endswith('_timedelta'):
filt_cfg_[tdkey] = ut.ensure_timedelta(filt_cfg_[tdkey])
class VerbFilterInfo(object):
def __init__(self):
self.prev_num_valid = None
def print_pre(self, is_valid, filt_cfg_):
num_valid = is_valid.sum()
print('[testres] Sampling from is_valid.size=%r with filt=%r' %
(is_valid.size, ut.get_cfg_lbl(filt_cfg_)))
print(' * is_valid.shape = %r' % (is_valid.shape,))
print(' * num_valid = %r' % (num_valid,))
self.prev_num_valid = num_valid
def print_post(self, is_valid, flags, msg):
if flags is not None:
num_passed = flags.sum()
num_valid = is_valid.sum()
num_invalidated = self.prev_num_valid - num_valid
print(msg)
if num_invalidated == 0:
if flags is not None:
print(' * num_passed = %r' % (num_passed,))
print(' * num_invalided = %r' % (num_invalidated,))
else:
print(' * prev_num_valid = %r' % (self.prev_num_valid,))
print(' * num_valid = %r' % (num_valid,))
#print(' * is_valid.shape = %r' % (is_valid.shape,))
self.prev_num_valid = num_valid
verbinfo = VerbFilterInfo()
if verbose:
verbinfo.print_pre(is_valid, filt_cfg_)
# Pop irrelevant info
ut.delete_keys(filt_cfg_, ['_cfgstr', '_cfgindex', '_cfgname', '_cfgtype'])
# Pop other non-rule config options
valid_rules = []
def poprule(rulename, default):
# register other rule names for debuging
valid_rules.append(rulename)
return filt_cfg_.pop(rulename, default)
allcfg = poprule('allcfg', None)
orderby = poprule('orderby', None)
reverse = poprule('reverse', None)
sortasc = poprule('sortasc', None)
sortdsc = poprule('sortdsc', poprule('sortdesc', None))
max_pername = poprule('max_pername', None)
require_all_cfg = poprule('require_all_cfg', None)
index = poprule('index', None)
# Pop all chosen rules
rule_value_list = [poprule(key, None) for key, rule in rule_list]
# Assert that only valid configurations were given
if len(filt_cfg_) > 0:
print('ERROR')
print('filtcfg valid rules are = %s' % (ut.repr2(valid_rules, nl=1),))
for key in filt_cfg_.keys():
print('did you mean %r instead of %r?' % (ut.closet_words(key, valid_rules)[0], key))
raise NotImplementedError('Unhandled filt_cfg.keys() = %r' % (filt_cfg_.keys()))
# Remove test cases that do not satisfy chosen rules
chosen_rule_idxs = ut.where([val is not None for val in rule_value_list])
chosen_rules = ut.take(rule_list, chosen_rule_idxs)
chosen_vals = ut.take(rule_value_list, chosen_rule_idxs)
for (key, rule), val in zip(chosen_rules, chosen_vals):
if isinstance(rule, np.ndarray):
# When a rule is an ndarray it must have boolean values
flags = rule == val
else:
flags = rule(val)
# HACK: flags are forced to be false for non-participating cases
flags = np.logical_and(flags, participates)
# conjunctive normal form of satisfiability
is_valid = np.logical_and(is_valid, flags)
if verbose:
verbinfo.print_post(is_valid, flags, 'SampleRule: %s = %r' % (key, val))
# HACK:
# If one config for a row passes the filter then all configs should pass
if allcfg:
is_valid = np.logical_or(np.logical_or.reduce(is_valid.T)[:, None], is_valid)
is_valid = np.logical_and(is_valid, participates)
qx_list, cfgx_list = np.nonzero(is_valid)
# Determine a good ordering of the test cases
if sortdsc is not None:
assert orderby is None, 'use orderby or sortasc'
assert reverse is None, 'reverse does not work with sortdsc'
orderby = sortdsc
reverse = True
elif sortasc is not None:
assert reverse is None, 'reverse does not work with sortasc'
assert orderby is None, 'use orderby or sortasc'
orderby = sortasc
reverse = False
else:
reverse = False
if orderby is not None:
#if orderby == 'gtscore':
# order_values = truth2_prop['gt']['score']
#elif orderby == 'gfscore':
# order_values = truth2_prop['gf']['score']
#else:
import re
order_values = None
for prefix_pattern in ['^gt_?', '^gf_?']:
prefix_match = re.match(prefix_pattern, orderby)
if prefix_match is not None:
truth = prefix_pattern[1:3]
propname = orderby[prefix_match.end():]
if verbose:
print('Ordering by truth=%s propname=%s' % (truth, propname))
order_values = truth2_prop[truth][propname]
break
if order_values is None:
raise NotImplementedError('Unknown orerby=%r' % (orderby,))
else:
order_values = np.arange(is_valid.size).reshape(is_valid.shape)
# Convert mask into indicies
flat_order = order_values[is_valid]
# Flat sorting indeices in a matrix
if verbose:
if verbose:
print('Reversing ordering (descending)')
else:
print('Normal ordering (ascending)')
if reverse:
sortx = flat_order.argsort()[::-1]
else:
sortx = flat_order.argsort()
qx_list = qx_list.take(sortx, axis=0)
cfgx_list = cfgx_list.take(sortx, axis=0)
# Return at most ``max_pername`` annotation examples per name
if max_pername is not None:
if verbose:
print('Returning at most %d cases per name ' % (max_pername,))
# FIXME: multiple configs
_qaid_list = np.take(qaids, qx_list)
_qnid_list = ibs.get_annot_nids(_qaid_list)
_valid_idxs = []
seen_ = ut.ddict(lambda: 0)
for idx, _qnid in enumerate(_qnid_list):
if seen_[_qnid] < max_pername:
seen_[_qnid] += 1
_valid_idxs.append(idx)
_qx_list = qx_list[_valid_idxs]
_cfgx_list = cfgx_list[_valid_idxs]
_valid_index = np.vstack((_qx_list, _cfgx_list)).T
is_valid = vt.index_to_boolmask(_valid_index, is_valid.shape, isflat=False)
qx_list = _qx_list
cfgx_list = _cfgx_list
if require_all_cfg:
if verbose:
prev_num_valid = is_valid.sum()
print('Enforcing that all configs must pass filters')
print(' * prev_num_valid = %r' % (prev_num_valid,))
qx2_valid_cfgs = ut.group_items(cfgx_list, qx_list)
hasall_cfg = [len(qx2_valid_cfgs[qx]) == testres.nConfig for qx in qx_list]
_qx_list = qx_list.compress(hasall_cfg)
_cfgx_list = cfgx_list.compress(hasall_cfg)
_valid_index = np.vstack((_qx_list, _cfgx_list)).T
is_valid = vt.index_to_boolmask(_valid_index, is_valid.shape, isflat=False)
qx_list = _qx_list
cfgx_list = _cfgx_list
if verbose:
verbinfo.print_post(is_valid, None,
'Enforcing that all configs must pass filters')
if index is not None:
if isinstance(index, six.string_types):
index = ut.smart_cast(index, slice)
_qx_list = ut.take(qx_list, index)
_cfgx_list = ut.take(cfgx_list, index)
_valid_index = np.vstack((_qx_list, _cfgx_list)).T
is_valid = vt.index_to_boolmask(_valid_index, is_valid.shape, isflat=False)
qx_list = _qx_list
cfgx_list = _cfgx_list
if verbose:
verbinfo.print_post(
is_valid, None,
'Taking index=%r sample from len(qx_list) = %r' % (
index, len(qx_list),))
if not return_mask:
case_pos_list = np.vstack((qx_list, cfgx_list)).T
case_identifier = case_pos_list
else:
if verbose:
print('Converting cases indicies to a 2d-mask')
case_identifier = is_valid
if verbose:
print('Finished case filtering')
print('Final case stats:')
qx_hist = ut.dict_hist(qx_list)
print('config per query stats: %r' % (ut.get_stats_str(qx_hist.values()),))
print('query per config stats: %r' % (ut.get_stats_str(ut.dict_hist(cfgx_list).values()),))
return case_identifier
def get_truth2_prop(testres, qaids=None, join_acfg=False):
r"""
Returns:
tuple: (truth2_prop, prop2_mat)
CommandLine:
python -m ibeis.expt.test_result --exec-get_truth2_prop --show
Example:
>>> # ENABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> import ibeis
>>> ibs, testres = ibeis.testdata_expts('PZ_MTEST', a=['ctrl'])
>>> (truth2_prop, prop2_mat) = testres.get_truth2_prop()
>>> result = '(truth2_prop, prop2_mat) = %s' % str((truth2_prop, prop2_mat))
>>> print(result)
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.show_if_requested()
"""
ibs = testres.ibs
test_qaids = testres.get_test_qaids() if qaids is None else qaids
#test_qaids = ut.random_sample(test_qaids, 20)
truth2_prop = ut.ddict(ut.odict)
# TODO: have this function take in a case_pos_list as input instead
participates = testres.get_infoprop_mat('participant', test_qaids)
truth2_prop['gt']['aid'] = testres.get_infoprop_mat('qx2_gt_aid', test_qaids)
truth2_prop['gf']['aid'] = testres.get_infoprop_mat('qx2_gf_aid', test_qaids)
truth2_prop['gt']['rank'] = testres.get_infoprop_mat('qx2_gt_rank', test_qaids)
truth2_prop['gf']['rank'] = testres.get_infoprop_mat('qx2_gf_rank', test_qaids)
truth2_prop['gt']['score'] = testres.get_infoprop_mat(
'qx2_gt_raw_score', test_qaids)
truth2_prop['gf']['score'] = testres.get_infoprop_mat(
'qx2_gf_raw_score', test_qaids)
truth2_prop['gt']['score'] = np.nan_to_num(truth2_prop['gt']['score'])
truth2_prop['gf']['score'] = np.nan_to_num(truth2_prop['gf']['score'])
# Cast nans to ints (that are participants)
# if False:
for truth in ['gt', 'gf']:
rank_mat = truth2_prop[truth]['rank']
flags = np.logical_and(np.isnan(rank_mat), participates)
rank_mat[flags] = testres.get_worst_possible_rank()
# truth2_prop[truth]['rank'] = rank_mat.astype(np.int)
is_success = truth2_prop['gt']['rank'] == 0
is_failure = np.logical_not(is_success)
# THIS IS NOT THE CASE IF THERE ARE UNKNOWN INDIVIDUALS IN THE DATABASE
assert np.all(is_success == (truth2_prop['gt']['rank'] == 0))
# WEIRD THINGS HAPPEN WHEN UNKNOWNS ARE HERE
#hardness_degree_rank[is_success]
# These probably just completely failure spatial verification
#is_weird = hardness_degree_rank == 0
# Get timedelta and annotmatch rowid
for truth in ['gt', 'gf']:
aid_mat = truth2_prop[truth]['aid']
timedelta_mat = np.vstack([
ibs.get_annot_pair_timedelta(test_qaids, aids)
for aids in aid_mat.T
]).T
annotmatch_rowid_mat = np.vstack([
ibs.get_annotmatch_rowid_from_undirected_superkey(test_qaids, aids)
for aids in aid_mat.T
]).T
truth2_prop[truth]['annotmatch_rowid'] = annotmatch_rowid_mat
truth2_prop[truth]['timedelta'] = timedelta_mat
prop2_mat = {}
prop2_mat['is_success'] = is_success
prop2_mat['is_failure'] = is_failure
prop2_mat['participates'] = participates
groupxs = testres.get_cfgx_groupxs()
def group_prop(val, grouped_flags, groupxs):
nRows = len(val)
# Allocate space for new val
new_shape = (nRows, len(groupxs))
if val.dtype == object or val.dtype.type == object:
new_val = np.full(new_shape, None, dtype=val.dtype)
elif ut.is_float(val):
new_val = np.full(new_shape, np.nan, dtype=val.dtype)
else:
new_val = np.zeros(new_shape, dtype=val.dtype)
# Populate new val
grouped_vals = vt.apply_grouping(val.T, groupxs)
_iter = enumerate(zip(grouped_flags, grouped_vals))
for new_col, (flags, group) in _iter:
rows, cols = np.where(flags.T)
new_val[rows, new_col] = group.T[(rows, cols)]
return new_val
if join_acfg:
assert ut.allsame(participates.sum(axis=1))
grouped_flags = vt.apply_grouping(participates.T, groupxs)
#new_prop2_mat = {key: group_prop(val)
# for key, val in prop2_mat.items()}
#new_truth2_prop = {
# truth: {key: group_prop(val)
# for key, val in props.items()}
# for truth, props in truth2_prop.items()}
new_prop2_mat = {}
for key, val in prop2_mat.items():
new_prop2_mat[key] = group_prop(val, grouped_flags, groupxs)
new_truth2_prop = {}
for truth, props in truth2_prop.items():
new_props = {}
for key, val in props.items():
new_props[key] = group_prop(val, grouped_flags, groupxs)
new_truth2_prop[truth] = new_props
prop2_mat_ = new_prop2_mat
truth2_prop_ = new_truth2_prop
else:
prop2_mat_ = prop2_mat
truth2_prop_ = truth2_prop
return truth2_prop_, prop2_mat_
def interact_individual_result(testres, qaid, cfgx=0):
ibs = testres.ibs
cfgx_list = ut.ensure_iterable(cfgx)
qreq_list = ut.take(testres.cfgx2_qreq_, cfgx_list)
# Preload any requested configs
cm_list = [qreq_.execute(qaids=[qaid]) for qreq_ in qreq_list]
cfgx2_shortlbl = testres.get_short_cfglbls()
show_kwargs = {
'N': 3,
'ori': True,
'ell_alpha': .9,
}
# SHOW ANALYSIS
show_kwargs['show_query'] = False
show_kwargs['viz_name_score'] = True
show_kwargs['show_timedelta'] = True
show_kwargs['show_gf'] = True
show_kwargs['with_figtitle'] = False
for cfgx, cm, qreq_ in zip(cfgx_list, cm_list, qreq_list):
query_lbl = cfgx2_shortlbl[cfgx]
fnum = cfgx
cm.ishow_analysis(
ibs, figtitle=query_lbl, fnum=fnum, annot_mode=1, qreq_=qreq_,
**show_kwargs)
def draw_score_diff_disti(testres):
r"""
CommandLine:
python -m ibeis --tf TestResult.draw_score_diff_disti --show -a varynannots_td -t best --db PZ_Master1
python -m ibeis --tf TestResult.draw_score_diff_disti --show -a varynannots_td -t best --db GZ_Master1
python -m ibeis --tf TestResult.draw_score_diff_disti --show -a varynannots_td1h -t best --db GIRM_Master1
python -m ibeis --tf TestResult.draw_score_diff_disti --show -a varynannots_td:qmin_pername=3,dpername=2 -t best --db PZ_Master1
python -m ibeis --tf get_annotcfg_list -a varynannots_td -t best --db PZ_Master1
13502
python -m ibeis --tf draw_match_cases --db PZ_Master1 -a varynannots_td:dsample_size=.01 -t best --show --qaid 13502
python -m ibeis --tf draw_match_cases --db PZ_Master1 -a varynannots_td -t best --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
>>> import ibeis
>>> ibs, testres = ibeis.testdata_expts('PZ_Master1', a=['varynannots_td'], t=['best'])
>>> result = testres.draw_score_diff_disti()
>>> print(result)
>>> ut.show_if_requested()
"""
import plottool_ibeis as pt
import vtool_ibeis as vt
# dont look at filtered cases
ibs = testres.ibs
qaids = testres.get_test_qaids()
qaids = ibs.get_annot_tag_filterflags(qaids, {'has_none': 'timedeltaerror'})
gt_rawscore = testres.get_infoprop_mat('qx2_gt_raw_score', qaids=qaids)
gf_rawscore = testres.get_infoprop_mat('qx2_gf_raw_score', qaids=qaids)
gt_valid_flags_list = np.isfinite(gt_rawscore).T
gf_valid_flags_list = np.isfinite(gf_rawscore).T
cfgx2_gt_scores = vt.zipcompress(gt_rawscore.T, gt_valid_flags_list)
cfgx2_gf_scores = vt.zipcompress(gf_rawscore.T, gf_valid_flags_list)
# partition by rank
gt_rank = testres.get_infoprop_mat('qx2_gt_rank', qaids=qaids)
gf_ranks = testres.get_infoprop_mat('qx2_gf_rank', qaids=qaids)
cfgx2_gt_ranks = vt.zipcompress(gt_rank.T, gt_valid_flags_list)
cfgx2_rank0_gt_scores = vt.zipcompress(cfgx2_gt_scores, [ranks == 0 for ranks in cfgx2_gt_ranks])
cfgx2_rankX_gt_scores = vt.zipcompress(cfgx2_gt_scores, [ranks > 0 for ranks in cfgx2_gt_ranks])
cfgx2_gf_ranks = vt.zipcompress(gf_ranks.T, gf_valid_flags_list)
cfgx2_rank0_gf_scores = vt.zipcompress(cfgx2_gf_scores, [ranks == 0 for ranks in cfgx2_gf_ranks])
#valid_gtranks = gt_rank[isvalid]
#valid_qaids = qaids[isvalid]
# Hack remove timedelta error
#valid_qaids = valid_qaids[flags]
#valid_gt_rawscore = valid_gt_rawscore[flags]
#valid_gtranks = valid_gtranks[flags]
xdata = list(map(len, testres.cfgx2_daids))
USE_MEDIAN = True # not ut.get_argflag('--use-mean')
#USE_LOG = True
USE_LOG = False
if USE_MEDIAN:
ave = np.median
dev = vt.median_abs_dev
else:
ave = np.mean
dev = np.std
def make_interval_args(arr_list, ave=ave, dev=dev, **kwargs):
#if not USE_MEDIAN:
# # maybe approximate median by removing the most extreme values
# arr_list = [np.array(sorted(arr))[5:-5] for arr in arr_list]
import utool as ut
if USE_LOG:
arr_list = list(map(lambda x: np.log(x + 1), arr_list))
sizes_ = list(map(len, arr_list))
ydata_ = list(map(ave, arr_list))
spread_ = list(map(dev, arr_list))
#ut.get_stats(arr_list, axis=0)
label = kwargs.get('label', '')
label += ' ' + ut.get_funcname(ave)
kwargs['label'] = label
print(label + 'score stats : ' +
ut.repr2(ut.get_jagged_stats(arr_list, use_median=True), nl=1, precision=1))
return ydata_, spread_, kwargs, sizes_
args_list1 = [
make_interval_args(cfgx2_gt_scores, label='GT', color=pt.TRUE_BLUE),
make_interval_args(cfgx2_gf_scores, label='GF', color=pt.FALSE_RED),
]
args_list2 = [
make_interval_args(cfgx2_rank0_gt_scores, label='GT-rank = 0', color=pt.LIGHT_GREEN),
make_interval_args(cfgx2_rankX_gt_scores, label='GT-rank > 0', color=pt.YELLOW),
make_interval_args(cfgx2_rank0_gf_scores, label='GF-rank = 0', color=pt.PINK),
#make_interval_args(cfgx2_rank2_gt_scores, label='gtrank < 2'),
]
plotargs_list = [args_list1, args_list2]
#plotargs_list = [args_list1]
ymax = -np.inf
ymin = np.inf
for args_list in plotargs_list:
ydata_list = np.array(ut.get_list_column(args_list, 0))
spread = np.array(ut.get_list_column(args_list, 1))
ymax = max(ymax, np.array(ydata_list + spread).max())
ymin = min(ymax, np.array(ydata_list - spread).min())
ylabel = 'log name score' if USE_LOG else 'name score'
statickw = dict(
#title='scores vs dbsize',
xlabel='database size (number of annotations)',
ylabel=ylabel,
#xscale='log', ymin=0, ymax=10,
linewidth=2, spread_alpha=.5, lightbg=True, marker='o',
#xmax='data',
ymax=ymax, ymin=ymin, xmax='data', xmin='data',
)
fnum = pt.ensure_fnum(None)
pnum_ = pt.make_pnum_nextgen(len(plotargs_list), 1)
for args_list in plotargs_list:
ydata_list = ut.get_list_column(args_list, 0)
spread_list = ut.get_list_column(args_list, 1)
kwargs_list = ut.get_list_column(args_list, 2)
sizes_list = ut.get_list_column(args_list, 3)
print('sizes_list = %s' % (ut.repr2(sizes_list, nl=1),))
# Pack kwargs list for multi_plot
plotkw = ut.dict_stack2(kwargs_list, '_list')
plotkw2 = ut.merge_dicts(statickw, plotkw)
pt.multi_plot(xdata, ydata_list, spread_list=spread_list,
fnum=fnum, pnum=pnum_(), **plotkw2)
#pt.adjust_subplots(hspace=.3)
figtitle = 'Score vs DBSize: %s' % (testres.get_title_aug())
pt.set_figtitle(figtitle)
def draw_rank_cmc(testres):
"""
Wrapper
"""
from ibeis.expt import experiment_drawing
experiment_drawing.draw_rank_cmc(testres.ibs, testres)
def draw_match_cases(testres, **kwargs):
"""
Wrapper
"""
from ibeis.expt import experiment_drawing
experiment_drawing.draw_match_cases(testres.ibs, testres, **kwargs)
def draw_failure_cases(testres, **kwargs):
"""
>>> from ibeis.other.dbinfo import * # NOQA
>>> import ibeis
>>> ibs, testres = ibeis.testdata_expts(defaultdb='PZ_MTEST', a='timectrl:qsize=2', t='invar:AI=[False],RI=False', use_cache=False)
"""
from ibeis.expt import experiment_drawing
#kwargs = kwargs.copy()
orig_filter = ':'
kwargs['f'] = orig_filter + 'fail'
case_pos_list = testres.case_sample2(':fail=True,index=0:5')
experiment_drawing.draw_match_cases(testres.ibs, testres, case_pos_list=case_pos_list, annot_modes=[1], interact=True)
def find_score_thresh_cutoff(testres):
"""
FIXME
DUPLICATE CODE
rectify with experiment_drawing
"""
#import plottool_ibeis as pt
import vtool_ibeis as vt
if ut.VERBOSE:
print('[dev] FIX DUPLICATE CODE find_thresh_cutoff')
#from ibeis.expt import cfghelpers
assert len(testres.cfgx2_qreq_) == 1, 'can only specify one config here'
cfgx = 0
#qreq_ = testres.cfgx2_qreq_[cfgx]
test_qaids = testres.get_test_qaids()
gt_rawscore = testres.get_infoprop_mat('qx2_gt_raw_score', qaids=test_qaids).T[cfgx]
gf_rawscore = testres.get_infoprop_mat('qx2_gf_raw_score', qaids=test_qaids).T[cfgx]
# FIXME: may need to specify which cfg is used in the future
#isvalid = testres.case_sample2(filt_cfg, return_mask=True).T[cfgx]
tp_nscores = gt_rawscore
tn_nscores = gf_rawscore
tn_qaids = tp_qaids = test_qaids
#encoder = vt.ScoreNormalizer(target_tpr=.7)
#print(qreq_.get_cfgstr())
part_attrs = {1: {'qaid': tp_qaids},
0: {'qaid': tn_qaids}}
fpr = None
tpr = .85
encoder = vt.ScoreNormalizer(adjust=8, fpr=fpr, tpr=tpr, monotonize=True)
#tp_scores = tp_nscores
#tn_scores = tn_nscores
name_scores, labels, attrs = encoder._to_xy(tp_nscores, tn_nscores, part_attrs)
encoder.fit(name_scores, labels, attrs)
score_thresh = encoder.learn_threshold2()
# Find intersection point
# TODO: add to score normalizer.
# Improve robustness
#pt.figure()
#pt.plot(xdata, curve)
#pt.plot(x_submax, y_submax, 'o')
return score_thresh
def print_percent_identification_success(testres):
"""
Prints names identified (at rank 1) / names queried.
This combines results over multiple queries of a particular name using
max
OLD, MAYBE DEPRIATE
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis.expt.test_result import * # NOQA
"""
ibs = testres.ibs
qaids = testres.get_test_qaids()
unique_nids, groupxs = ut.group_indices(ibs.get_annot_nids(qaids))
qx2_gt_raw_score = testres.get_infoprop_mat('qx2_gt_raw_score', qaids=qaids)
qx2_gf_raw_score = testres.get_infoprop_mat('qx2_gf_raw_score', qaids=qaids)
nx2_gt_raw_score = np.array([
np.nanmax(scores, axis=0)
for scores in vt.apply_grouping(qx2_gt_raw_score, groupxs)])
nx2_gf_raw_score = np.array([
np.nanmax(scores, axis=0)
for scores in vt.apply_grouping(qx2_gf_raw_score, groupxs)])
cfgx2_success = (nx2_gt_raw_score > nx2_gf_raw_score).T
print('Identification success (names identified / names queried)')
for cfgx, success in enumerate(cfgx2_success):
pipelbl = testres.cfgx2_lbl[cfgx]
percent = 100 * success.sum() / len(success)
print('%2d) success = %r/%r = %.2f%% -- %s' % (
cfgx, success.sum(), len(success), percent, pipelbl))
def print_config_overlap(testres, with_plot=True):
truth2_prop, prop2_mat = testres.get_truth2_prop()
qx2_gt_ranks = truth2_prop['gt']['rank']
qx2_success = (qx2_gt_ranks == 0)
cfgx2_num_correct = np.nansum(qx2_success, axis=0)
best_cfgx = cfgx2_num_correct.argmax()
print('Config Overlap')
# Matrix version
#disjoint_mat = np.zeros((testres.nConfig, testres.nConfig), dtype=np.int32)
#improves_mat = np.zeros((testres.nConfig, testres.nConfig), dtype=np.int32)
isect_mat = np.zeros((testres.nConfig, testres.nConfig), dtype=np.int32)
union_mat = np.zeros((testres.nConfig, testres.nConfig), dtype=np.int32)
for cfgx1 in range(testres.nConfig):
for cfgx2 in range(testres.nConfig):
if cfgx1 == cfgx2:
success_qx1 = np.where(qx2_success.T[cfgx1])[0]
isect_mat[cfgx1][cfgx2] = len(success_qx1)
union_mat[cfgx1][cfgx2] = len(success_qx1)
continue
success_qx1 = np.where(qx2_success.T[cfgx1])[0]
success_qx2 = np.where(qx2_success.T[cfgx2])[0]
union_ = np.union1d(success_qx1, success_qx2)
isect_ = np.intersect1d(success_qx1, success_qx2)
#disjoints = np.setdiff1d(union_, isect_)
#disjoint_mat[cfgx1][cfgx2] = len(disjoints)
isect_mat[cfgx1][cfgx2] = len(isect_)
union_mat[cfgx1][cfgx2] = len(union_)
#improves = np.setdiff1d(success_qx2, isect_)
#improves_mat[cfgx2][cfgx1] = len(improves)
n_success_list = np.array([qx2_success.T[cfgx1].sum() for cfgx1 in range(testres.nConfig)])
improves_mat = n_success_list[:, None] - isect_mat
disjoint_mat = union_mat - isect_mat
print('n_success_list = %r' % (n_success_list,))
print('union_mat =\n%s' % (union_mat,))
print('isect_mat =\n%s' % (isect_mat,))
print('cfgx1 and cfgx2 have <x> not in common')
print('disjoint_mat =\n%s' % (disjoint_mat,))
print('cfgx1 helps cfgx2 by <x>')
print('improves_mat =\n%s' % (improves_mat,))
print('improves_mat.sum(axis=1) = \n%s' % (improves_mat.sum(axis=1),))
bestx_by_improves = improves_mat.sum(axis=1).argmax()
print('bestx_by_improves = %r' % (bestx_by_improves,))
# Numbered version
print('best_cfgx = %r' % (best_cfgx,))
for cfgx in range(testres.nConfig):
if cfgx == best_cfgx:
continue
pipelbl = testres.cfgx2_lbl[cfgx]
qx2_anysuccess = np.logical_or(qx2_success.T[cfgx], qx2_success.T[best_cfgx])
# Queries that other got right that best did not get right
qx2_othersuccess = np.logical_and(qx2_anysuccess, np.logical_not(qx2_success.T[best_cfgx]))
print('cfgx %d) has %d success cases that that the best config does not have -- %s' % (cfgx, qx2_othersuccess.sum(), pipelbl))
qx2_success.T[cfgx]
if with_plot:
#y = None
#for x in qx2_gt_ranks:
# x = np.minimum(x, 3)
# z = (x.T - x[:, None])
# if np.any(z):
# print(z)
# if y is None:
# y = z
# else:
# y += z
if False:
# Chip size stats
ave_dlen = [np.sqrt(np.array(testres.ibs.get_annot_chip_dlensqrd( # NOQA
testres.qaids, config2_=qreq_.query_config2_))).mean()
for qreq_ in testres.cfgx2_qreq_]
ave_width_inimg = [np.array(testres.ibs.get_annot_bboxes( # NOQA
testres.qaids, config2_=qreq_.query_config2_))[:, 2 + 0].mean()
for qreq_ in testres.cfgx2_qreq_]
ave_width = [np.array(testres.ibs.get_annot_chip_sizes( # NOQA
testres.qaids, config2_=qreq_.query_config2_))[:, 0].mean()
for qreq_ in testres.cfgx2_qreq_]
import plottool_ibeis as pt
#pt.plt.imshow(-y, interpolation='none', cmap='hot')
#pt.plt.colorbar()
def label_ticks():
import plottool_ibeis as pt
ax = pt.gca()
labels = testres.get_varied_labels()
ax.set_xticks(list(range(len(labels))))
ax.set_xticklabels([lbl[0:100] for lbl in labels])
[lbl.set_rotation(-25) for lbl in ax.get_xticklabels()]
[lbl.set_horizontalalignment('left') for lbl in ax.get_xticklabels()]
#xgrid, ygrid = np.meshgrid(range(len(labels)), range(len(labels)))
#pt.plot_surface3d(xgrid, ygrid, disjoint_mat)
ax.set_yticks(list(range(len(labels))))
ax.set_yticklabels([lbl[0:100] for lbl in labels])
[lbl.set_horizontalalignment('right') for lbl in ax.get_yticklabels()]
[lbl.set_verticalalignment('center') for lbl in ax.get_yticklabels()]
#[lbl.set_rotation(20) for lbl in ax.get_yticklabels()]
pt.figure(fnum=pt.next_fnum())
pt.plt.imshow(union_mat, interpolation='none', cmap='hot')
pt.plt.colorbar()
pt.set_title('union mat: cfg<x> and cfg<y> have <z> success cases in in total')
label_ticks()
label_ticks()
pt.figure(fnum=pt.next_fnum())
pt.plt.imshow(isect_mat, interpolation='none', cmap='hot')
pt.plt.colorbar()
pt.set_title('isect mat: cfg<x> and cfg<y> have <z> success cases in common')
label_ticks()
pt.figure(fnum=pt.next_fnum())
pt.plt.imshow(disjoint_mat, interpolation='none', cmap='hot')
pt.plt.colorbar()
pt.set_title('disjoint mat (union - isect): cfg<x> and cfg<y> have <z> success cases not in common')
#xgrid, ygrid = np.meshgrid(range(len(labels)), range(len(labels)))
#pt.plot_surface3d(xgrid, ygrid, improves_mat)
pt.figure(fnum=pt.next_fnum())
pt.plt.imshow(improves_mat, interpolation='none', cmap='hot')
pt.plt.colorbar()
pt.set_title('improves mat (diag.T - isect): cfg<x> got <z> qaids that cfg <y> missed')
label_ticks()
#pt.colorbar(np.unique(y))
def map_score(testres):
"""
For each query compute a precision recall curve.
Then, for each query compute the average precision.
Then take the mean of all average precisions to obtain the mAP.
Script:
>>> #ibs = ibeis.opendb('Oxford')
>>> #ibs, testres = ibeis.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False,True]')
>>> import ibeis
>>> ibs, testres = ibeis.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False,True],can_match_sameimg=True')
>>> import ibeis
>>> ibs, testres = ibeis.testdata_expts('Oxford', a='oxford', p='smk:nWords=[64000],nAssign=[1],SV=[False],can_match_sameimg=True')
"""
import sklearn.metrics
qaids = testres.get_test_qaids()
ibs = testres.ibs
PLOT = True
PLOT = False
cfgx2_cms = []
for qreq_ in testres.cfgx2_qreq_:
cm_list = qreq_.execute(qaids)
cm_list = [cm.extend_results(qreq_) for cm in cm_list]
for cm in cm_list:
cm.score_annot_csum(qreq_)
# #cm.sortself()
cfgx2_cms.append(cm_list)
map_list = []
unique_names, groupxs = ut.group_indices(ibs.annots(qaids).names)
for cm_list, qreq_ in zip(cfgx2_cms, testres.cfgx2_qreq_):
if PLOT:
import plottool_ibeis as pt
pt.qt4ensure()
fnum = pt.ensure_fnum(None)
pt.figure(fnum=fnum)
avep_list = []
#fnum = pt.ensure_fnum(None)
#pt.figure(fnum=fnum)
for cm in cm_list:
# Ignore junk images
flags = np.array(ibs.annots(cm.daid_list).quality_texts) != 'junk'
assert np.all(flags)
daid_list = cm.daid_list
dnid_list = cm.dnid_list
y_true = (cm.qnid == dnid_list).compress(flags).astype(np.int)
y_score = cm.annot_score_list.compress(flags)
y_score[~np.isfinite(y_score)] = 0
y_score = np.nan_to_num(y_score)
sortx = np.argsort(y_score)[::-1]
daid_list = daid_list.take(sortx)
dnid_list = dnid_list.take(sortx)
y_true = y_true.take(sortx)
y_score = y_score.take(sortx)
#print(cm.get_annot_ranks(cm.get_top_gt_aids(ibs)))
precision, recall, thresholds = sklearn.metrics.precision_recall_curve(y_true, y_score)
if PLOT:
pt.plot2(recall, precision, marker='', linestyle='-', x_label='recall', y_label='precision')
avep = sklearn.metrics.average_precision_score(y_true, y_score)
#avep = [
# sklearn.metrics.average_precision_score(y_true, y_score, average=average)
# for average in ['micro', 'macro', 'samples', 'weighted']
#]
#if np.any(np.isnan(avep)):
# break
# if np.isnan(avep):
# break
avep_list.append(avep)
#mean_ave_precision = np.mean(avep_list, axis=0)
name_to_ave = [np.mean(a) for a in ut.apply_grouping(avep_list, groupxs)]
name_to_ave_ = dict(zip(unique_names, name_to_ave))
print('name_to_ave_ = %s' % (ut.align(ut.repr3(name_to_ave_, precision=3), ':')))
mean_ave_precision = np.mean(name_to_ave)
print('mean_ave_precision = %r' % (mean_ave_precision,))
map_list.append(mean_ave_precision)
return map_list
def embed_testres(testres):
"""
CommandLine:
python -m ibeis TestResults.embed_testres
Example:
>>> # SCRIPT
>>> from ibeis.expt.test_result import * # NOQA
>>> from ibeis.init import main_helpers
>>> ibs, testres = main_helpers.testdata_expts(defaultdb='PZ_MTEST')
>>> embed_testres(testres)
"""
ut.embed()
def get_options(testres):
func_list = [
testres.print_results,
testres.draw_rank_cmc,
testres.draw_match_cases,
testres.embed_testres
]
return func_list
def get_actions(testres):
actions = ut.odict([
(testres.print_results, (['print', 'p'], '')),
(testres.draw_rank_cmc, (['cmc'], '')),
(testres.draw_match_cases, (['case'], '')),
(testres.embed_testres, (['embed', 'ipy'], '')),
])
return actions
def help(testres):
# list functions that accept the standard interface
prefix = 'ibeis'
suffix = testres.reconstruct_test_flags()
func_list = testres.get_options()
funcname_list = [ut.get_funcname(func) for func in func_list]
cmdstr_list = [' '.join([prefix, funcname, suffix]) for funcname in funcname_list]
ut.cprint('Available Functions:', 'blue')
print(', '.join(funcname_list))
ut.cprint('Available Commandline:', 'blue')
print('\n'.join(cmdstr_list))
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.expt.test_result
python -m ibeis.expt.test_result --allexamples
python -m ibeis.expt.test_result --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 |
michigraber/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 85 | 6377 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
p1 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[0])
p2 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[1])
p3 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[2])
labels = ['n_estimators=' + str(params_list[0]['n_estimators']) +
', n_candidates=' + str(params_list[0]['n_candidates']),
'n_estimators=' + str(params_list[1]['n_estimators']) +
', n_candidates=' + str(params_list[1]['n_candidates']),
'n_estimators=' + str(params_list[2]['n_estimators']) +
', n_candidates=' + str(params_list[2]['n_candidates'])]
# Plot precision
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
meduz/scikit-learn | sklearn/svm/tests/test_sparse.py | 63 | 13366 | import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import (assert_raises, assert_true, assert_false,
assert_warns, assert_raise_message,
ignore_warnings)
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
eclee25/flu-SDI-exploratory-age | scripts/create_fluseverity_figs_v4/functions_v4.py | 1 | 67932 | #!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 10/19/14
## Purpose: script of functions for data cleaning and processing to draw flu severity figures; supports figures in create_fluseverity_figs
## v2: swap child:adult OR to adult:child OR
## v3: continue swap, adjust incidence for age-specific ILI HC seeking behavior, ratio of week-specific any diagnosis visits in S9/S#
## v4: change to relative risk, coverage adjustment: ratio of season-specific any diagnosis visits in S9/S#, care-seeking adjustment: age-specific ILI HC seeking behavior
###Command Line: would not be called from command line directly
##############################################
##############################################
# header
from collections import defaultdict
from datetime import date, datetime
from itertools import product
import numpy as np
import matplotlib.cm as cm
import bisect
import csv
ID = '09'
filename_dummy0 = '/home/elee/Downloads/test/OR_time_%s.png' %(ID)
filename_dummy1 = '/home/elee/Downloads/test/zOR_time_%s.png' %(ID)
filename_dummy2 = '/home/elee/Downloads/test/zOR_benchR_%s.png' %(ID)
filename_dummy3 = '/home/elee/Downloads/test/zOR_benchE_%s.png' %(ID)
##############################################
# global parameters - methods
## SDI data ##
gp_normweeks = 7 # number of weeks in baseline normalization period
gp_fluweeks = 34 # number of weeks in flu season (weeks 40-20)
gp_retro_duration = 2 # duration of retrospective period in weeks
gp_begin_retro_week = 3 # number of weeks before the peak incidence week that the retrospective period should begin (that season only)
gp_early_duration = 2 # duration of the early warning period in weeks
gp_begin_early_week = 2 # number of weeks after the week with Thanksgiving that the early warning period should begin (that season only)
gp_plotting_seasons = range(2,10) # season numbers for which data will be plotted (eg. Season 2 = 2001-02)
gp_plotting_regions = range(1, 11) # region numbers
gp_mild =[3, 6, 7, 9] # seasons 3, 6, 7, 9
gp_mod = [2, 5] # seasons 2, 5
gp_sev = [4, 8] # seasons 4, 8, 10 (pandemic)
## ILINet data ##
gp_ILINet_plotting_seasons = range(-2, 10) + range(11,15) # remove 2009-10 data
## pandemic analyses only ##
gp_pandemic_plotting_seasons = range(9,11) # 2008-09 and 2009-10 data only
gp_pandemicbaseline = ['between pandemic waves', 'last season baseline', 'after pandemic']
## create dict_ages ##
age_keys = ['C', 'A', 'O']
children = ['5-9 YEARS', '10-14 YEARS', '15-19 YEARS']
adults = ['20-29 YEARS', '30-39 YEARS', '40-49 YEARS', '50-59 YEARS']
other = ['<2 YEARS', '2-4 YEARS', '60-69 YEARS', '70-79 YEARS', '80 YEARS']
dict_ages = defaultdict(list)
# dict_ages[agegroup code] = [agegroup bin 1, age group bin 2,... in text]
dict_ages = dict(zip(age_keys, [children, adults, other]))
## ILI care-seeking behavior ##
# national level, weighted averages based on sample size in Biggerstaff2012 and Biggerstaff2014
# children = 5-17, adults = 18-64, other = <5 & >64. See health_seeking_behavior_edited_101014.ods
dict_careseek_nat = {'C':0.5146, 'A':0.4095, 'O':0.6262, 'T':0.4501}
# Census region level, Biggerstaff2012
# children = 0-17, adults >= 18
dict_careseek_census = {('NE', 'A'):0.44, ('MW', 'A'):0.39, ('SO', 'A'):0.42, ('WE', 'A'):0.33, ('NE', 'C'):0.58, ('MW', 'C'):0.48, ('SO', 'C'):0.66, ('WE', 'C'):0.50}
##############################################
# global parameters - plotting
## generic label formatting ##
gp_sigma_r = r'Retrospective Severity, $\bar \sigma_r$'
gp_sigma_w = r'Early Warning Severity, $\bar \sigma_w$'
gp_sigmat = r'Adult-Child Disease Burden, $\sigma(t)$'
gp_benchmark = r'Benchmark, $\beta$'
gp_attackrate = r'Seasonal Attack Rate (per 100,000)'
## SDI data ##
gp_seasonlabels = ['01-02', '02-03', '03-04', '04-05', '05-06', '06-07', '07-08', '08-09']
gp_colors_1_10 = ['grey', 'black', 'red', 'orange', 'gold', 'green', 'blue', 'cyan', 'darkviolet', 'hotpink']
# gp_colors = ['black', 'red', 'orange', 'gold', 'green', 'blue', 'cyan', 'darkviolet']
gp_colors = ["#e41a1c", "#228b22", "#377eb8", "#ff7f00", "#984ea3", "#ffff33", "#a65628", "#f781bf"]
gp_retro_early_colors = ['black', '#7cfc00']
gp_regions = ['Boston (R1)', 'New York (R2)', 'Philadelphia (R3)', 'Atlanta (R4)', 'Chicago (R5)', 'Dallas (R6)', 'Kansas City (R7)', 'Denver (R8)', 'San Francisco (R9)', 'Seattle (R10)']
gp_weeklabels = range(40,54) # week number labels for plots vs. time
gp_weeklabels.extend(range(1,40))
gp_severitylabels = ['Mild', 'Moderate', 'Severe']
gp_severitycolors = ['b', 'y', 'r']
gp_line_style = ['-', ':']
gp_barwidth = 0.35
gp_agelabels = ['Child', 'Adult', 'Other Ages']
gp_agecolors = ['orange', 'grey', 'green']
gp_mild_severe_colors = ['blue', 'red']
gp_plot_titles = ['Mild Season', 'Severe Season']
gp_marker = 'None'
gp_linewidth = 3
## ILINet data ##
gp_ILINet_seasonlabels = ['97-98', '98-99', '99-00', '00-01', '01-02', '02-03', '03-04', '04-05', '05-06', '06-07', '07-08', '08-09', '10-11', '11-12', '12-13', '13-14']
gp_ILINet_colors = cm.rainbow(np.linspace(0, 1, len(gp_ILINet_seasonlabels)))
gp_txt_retro_coords = [(-5.5, 19), (8, -18)] # mild, severe
gp_txt_early_coords = [(-5.5, 7), (4.5, -7)] # mild, severe
##############################################
## files ##
thanksin = open('/home/elee/Dropbox/My_Bansal_Lab/Clean_Data_for_Import/ThanksgivingWeekData_cl.csv', 'r')
thanksin.readline() # remove header
csv_Thanksgiving = csv.reader(thanksin, delimiter=',')
##############################################
## call parameters ##
# set these parameters every time a plot is run
pseasons = gp_plotting_seasons
##############################################
def anydiag_baseline_comparison(csvreadfile):
''' Number of any diagnosis visits across all
ages, service places, and zip3s for fall baseline (weeks 40-46) and summer baseline (weeks 33-39 in previous 'season').
dict_anydiag[season] = (# anydiag fall BL, # anydiag summer BL)
'''
main(anydiag_baseline_comparison)
dict_wk = {}
dict_dummyany = {}
# import data
for row in csvreadfile:
season, weeknum = int(row[0]), int(row[3])
anydiag = float(row[4])
week = row[1]
wk = date(int(week[:4]), int(week[5:7]), int(week[8:]))
dict_wk[(wk, weeknum)] = season
dict_dummyany[(wk, weeknum)] = anydiag
dict_anydiag = {}
for season in pseasons:
# keys for wks 40-46 of season
fallBL_weeks = [key for key in dict_wk if dict_wk[key] == season and key[1] > 39]
# keys for wks 33-39 of season-1
summerBL_weeks = [key for key in dict_wk if dict_wk[key] == season-1 and key[1] < 40]
# total number of diagnoses # divide by #weeks?
fallBL = sum([dict_dummyany[key] for key in fallBL_weeks])#/float(len(fallBL_weeks))
summerBL = sum([dict_dummyany[key] for key in summerBL_weeks])#/float(len(summerBL_weeks))
dict_anydiag[season] = (fallBL, summerBL)
return dict_anydiag
##############################################
def benchmark_factors_import(csvreadfile):
''' Import CDC_Source/Import_Data/cdc_severity_data_cleaned.csv, which includes the raw data used to create the benchmark index that pairs with the SDI severity index.
dict_benchfactors[season] = (percent positive isolates, proportion of total mortality due to P&I, number of pediatric deaths, child hospitalization rate, adult hospitalization rate)
'''
main(benchmark_factors_import)
dict_benchfactors = {}
for row in csvreadfile:
row2 = [float('nan') if item == 'NA' else item for item in row]
season = int(row2[0])
perc_pos, pi_mort, ped = float(row2[1]), float(row2[2]), float(row2[3])
c_hos, a_hos = float(row2[5]), float(row2[6])
dict_benchfactors[season] = (perc_pos, pi_mort, ped, c_hos, a_hos)
return dict_benchfactors
##############################################
def benchmark_import (csv_cdcseverity, index_col):
''' Import CDC_Source/Import_Data/cdc_severity_index.csv data, which includes z-normalized contributors to CDC severity index. These data include: percent of positive flu lab tests, proportion of mortality due to P&I, pediatric deaths, 5-17 years hospitalization rate, and 18-49 years hospitalization rate. Outpatient ILI is included in the index in the 7th column. The index in the 8th column does not include outpatient ILI. All data sources are not available for every season. Flu season includes weeks 40 to 17 (instead of the standard weeks 40 to 20) because Return dictionary with season to benchmark index value.
dict_benchmark[seasonnum] = CDC benchmark index value
'''
main(benchmark_import)
season, index = [],[]
for row in csv_cdcseverity:
season.append(int(row[0]))
index.append(float(row[index_col]))
# dict_benchmark[seasonnum] = CDC severity index value
dict_benchmark = dict(zip(season, index))
return dict_benchmark
##############################################
def cdc_import_CFR_CHR (csv_allcdc):
''' Import CDC_Source/Import_Data/all_cdc_source_data.csv, which includes weekly CDC source data from multiple surveillance systems from October 1997 to December 2013. Export season-level case fatality proxy and flu hospitalization rate using P&I deaths, lab-confirmed hospitalization rates per 100,000, and ILI cases as numerators, numerators, and denominators, respectively. Return dictionaries with season to hospitalization rate across entire season (lab-confirmed flu), season to . Note: These are not the same as case-hospitalization or case-fatality rates.
dict_CHR[seasonnum] = cumulative lab-confirmed case-hospitalization rate over the period from week 40 to week 17 during flu season
'''
main(cdc_import_CFR_CHR)
dict_deaths_ILI_counts, dict_CHR = {}, {}
for row in csv_allcdc:
year, week, season = str(row[1][2:]), str(row[2]), str(row[12])
dummyvals = [row[19], row[13], row[27], row[28]] # PI_deaths, allcoz_deaths, ILI, allpatients # 7/28/14 corrected indexing typo
PI_deaths, allcoz_deaths, ILI, allpatients = [float('nan') if val == 'NA' else float(val) for val in dummyvals] # 7/28/14 convert NA string to NaNs
CHR = str(row[26])
if week == 'NA':
continue
# reassign year and week as integers after skipping NAs
year, week = int(year), int(week)
if int(season) in pseasons:
# dict_deaths_ILI_counts[(seasonnum, weeknum)] = (P&I deaths, all deaths, ILI cases, total patients)
dict_deaths_ILI_counts[(int(season), week)] = (PI_deaths, allcoz_deaths, ILI, allpatients)
# grab cumulative hospitalization rate at week 17 in each plotting season
if year in pseasons and week == 17:
# for seasons prior to 2003-04, CHR should be float('nan')
if CHR == 'NA':
CHR = float('nan')
# dict_CHR[seasonnum] = cumulative lab-confirmed case-hospitalization rate per 100,000 individuals in population over the period from week 40 to week 17 during flu season
dict_CHR[year] = float(CHR)
# subset dict_deaths_ILI_counts for weeks that will contribute to each season's P&I mortality and ILI proportion rates (weeks 40 to 20)
dict_deaths_ILI_counts_fluwks = dict([(k, dict_deaths_ILI_counts[k]) for k in dict_deaths_ILI_counts if k[1]>20 and k[1]<40])
# sum PI_deaths, allcoz_deaths, ILI, allpatients for each season
dict_deaths, dict_ILI, dict_CFR = {}, {}, {}
for s in pseasons:
# dict_deaths[seasonnum] = (P&I deaths from wks 40 to 20, all cause deaths from wks to 40 to 20)
dict_deaths[s] = (sum([float(dict_deaths_ILI_counts_fluwks[k][0]) for k in dict_deaths_ILI_counts_fluwks if k[0] == s]), sum([float(dict_deaths_ILI_counts_fluwks[k][1]) for k in dict_deaths_ILI_counts_fluwks if k[0] == s]))
# dict_ILI[seasonnum] = (ILI cases from wks 40 to 20, all patients from wks 40 to 20)
dict_ILI[s] = (sum([float(dict_deaths_ILI_counts_fluwks[k][2]) for k in dict_deaths_ILI_counts_fluwks if k[0] == s]), sum([float(dict_deaths_ILI_counts_fluwks[k][2]) for k in dict_deaths_ILI_counts_fluwks if k[0] == s]))
# dict_CFR[seasonnum] = P&I deaths of all flu season deaths in 122 cities/outpatient ILI cases of all flu season patient visits to outpatient offices in ILINet
dict_CFR[s] = (dict_deaths[s][0]/dict_deaths[s][1])/(dict_ILI[s][0]/dict_ILI[s][1])
return dict_CHR, dict_CFR, dict_deaths, dict_ILI
# ##############################################
# def classif_zOR_index_state(dict_wk, dict_incid53ls, dict_incid53ls_state, retro_level_string, csv_Thanksgiving):
# ''' Find the retrospective and early warning period start weeks by season. The retrospective period may be designated in two manners -- relative to the national peak incidence week or regional peak incidence week in the flu season. The early warning period is designated relative to the week of Thanksgiving. This function returns a dictionary that can be used by classif_zOR_region_processing to set the classification periods for each region to that which was defined at the national level.
# The week_plotting_dicts (national and state) function should be run before this function. The Thanksgiving_import function is nested within this function. Return dictionary for season to (index of first retro period week, index of first early warning period week).
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
# '''
# main(classif_zOR_index_state)
# # for nation-level peak-based retrospective classification
# # dict_wk[week] = seasonnum, dict_incid53ls[seasonnum] = [ILI wk 40, ILI wk 41,...], dict_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], dict_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
# # for region-level peak-based retrospective classification
# # (don't want to call dict_wk twice), dict_incid53ls_state[(seasonnum, state)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_state[(seasonnum, state)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_state[(seasonnum, state)] = [zOR wk 40, zOR wk 41, ...]
# dict_classifindex = {}
# # import Thanksgiving data
# dict_Thanksgiving = Thanksgiving_import(csv_Thanksgiving)
# # states in state-level analysis
# state_keys = list(set([k[1] for k in dict_incid53ls_state]))
# for s, state in product(pseasons, state_keys):
# weekdummy = sorted([key for key in dict_wk if dict_wk[key] == s])
# ILINet_week_OR_processing
# # nation-lvl peak-based retrospective classification
# if retro_level_string == 'nation':
# peak_index = peak_flu_week_index(dict_incid53ls[s]) # 7/31/14 - max among flu weeks
# begin_retro = peak_index - gp_begin_retro_week
# # state-lvl peak-based retrospective classif
# elif retro_level_string == 'state':
# peak_index = peak_flu_week_index(dict_incid53ls_state[(s, state)]) # 7/31/14 - max among flu weeks
# begin_retro = peak_index - gp_begin_retro_week
# else:
# print 'retro_level_string error'
# break
# # Thanksgiving-based early warning classification
# Thx_index = weekdummy.index(dict_Thanksgiving[s])
# begin_early = Thx_index + gp_begin_early_week
# # dict_classifindex[(seasonnum, state)] = (index of first retro period week, index of first early warning period week)
# dict_classifindex[(s, state)] = (begin_retro, begin_early)
# return dict_classifindex
##############################################
def classif_zRR_processing(dict_wk, dict_totIncidAdj53ls, dict_zRR53ls, csv_Thanksgiving):
''' Calculate retrospective and early warning zRR classification values for each season, which is the mean zRR for the duration of the retrospective and early warning periods, respectively. The retrospective period is designated relative to the peak incidence week in the flu season. The early warning period is designated relative to the week of Thanksgiving.
Mean retrospective period zRR is based on a baseline normalization period (gp: normweeks), duration of retrospective period (gp: retro_duration), and number of weeks prior to peak incidence week, which dictates when the retrospective period begins that season (gp: begin_retro_week). Mean early warning period zRR is based on gp: normweeks, gp: early_duration, and gp: begin_early_week. 'gp' stands for global parameter, which is defined within functions.py. The Thanksgiving_import and identify_retro_early_weeks functions are nested within this function. Returns one dict:
dict_classifzRR[seasonnum] = (mean retrospective zRR, mean early warning zRR)
'''
main(classif_zRR_processing)
# dict_indices[(season, period ('r' or 'e'))] = (begin index, end index)
dict_indices = identify_retro_early_weeks(dict_wk, dict_totIncidAdj53ls)
dict_classifzRR = {}
for s in pseasons:
# peak-based retrospective classification
begin_retro, end_retro = dict_indices[(s, 'r')]
# list of week indices in retrospective period
retro_indices = xrange(begin_retro, end_retro)
mean_retro_zRR = np.mean([dict_zRR53ls[s][i] for i in retro_indices])
# Thanksgiving-based early warning classification
begin_early, end_early = dict_indices[(s, 'e')]
# list of week indices in early warning period
early_indices = xrange(begin_early, end_early)
mean_early_zRR = np.mean([dict_zRR53ls[s][i] for i in early_indices])
# dict_classifzRR[seasonnum] = (mean retrospective zRR, mean early warning zRR)
dict_classifzRR[s] = (mean_retro_zRR, mean_early_zRR)
return dict_classifzRR
##############################################
def classif_zRR_processing_spatial(dict_wk, dict_spatialTotIncidAdj53ls, dict_spatialZRR53ls, csv_Thanksgiving, spatial_keys):
''' Calculate retrospective and early warning zOR classification values for each season and spatial (state/region) combination. Spatial retrospective classifications are tied to the peak adjusted incidence week of the state/region in a given season. Spatial early warning classifications are tied to the week of Thanksgiving in a given season, and are thus the same as the early warning periods at the national level. The Thanksgiving_import and identify_retro_early_weeks functions are nested within this function. Returns one dict:
dict_classifzRR_spatial[(season, spatial)] = (mean retrospective zOR, mean early warning zOR)
'''
main(classif_zRR_processing_spatial)
dict_classifzRR_spatial = {}
for spatial in spatial_keys:
dict_incidAdj_dummy = dict((key[0], dict_spatialTotIncidAdj53ls[key]) for key in dict_spatialTotIncidAdj53ls if key[1] == spatial)
# subset begin/end retro and early week indexes for a given spatial designation (in order to be able to use the identify_retro_early_weeks function)
# dict_indices[(season, period ('r' or 'e'))] = (begin index, end index)
dict_indices = identify_retro_early_weeks(dict_wk, dict_incidAdj_dummy)
for s in pseasons:
# peak-based retrospective classification
begin_retro, end_retro = dict_indices[(s, 'r')]
# list of week indices in retrospective period
retro_indices = xrange(begin_retro, end_retro)
mean_retro_zOR = np.mean([dict_spatialZRR53ls[(s, spatial)][i] for i in retro_indices])
# Thanksgiving-based early warning classification
begin_early, end_early = dict_indices[(s, 'e')]
# list of week indices in early warning period
early_indices = xrange(begin_early, end_early)
mean_early_zOR = np.mean([dict_spatialZRR53ls[(s, spatial)][i] for i in early_indices])
dict_classifzRR_spatial[(s, spatial)] = (mean_retro_zOR, mean_early_zOR)
return dict_classifzRR_spatial
##############################################
def contributions_CAO_to_attack(dict_wk, dict_incid):
''' Import dict_wk and dict_incid. Sum values in dict_incid for children, adults, and other age groups to get an attack rate for each season. The sum of the child, adult, and other attack rates is the total attack rate. Calculate the percentage contribution of each age group to the total attack rate for each season, in preparation to plot data in a stacked 100% bar chart. The flu season is defined as weeks 40 to 20.
dict_wk[week] = seasonnum
dict_incid[week] = (child ILI cases per 100,000 in US population in second calendar year of flu season, adult incid per 100,000, other age group ILI cases per 100,000)
dict_perc_totAR[seasonnum] = (% contribution of child AR to total AR, % contribution of adult AR to total AR, % contribution of other ages AR to total AR)
dict_tot_attack[seasonnum] = total attack rate for weeks 40 to 20 by 100,000
'''
main(contributions_CAO_to_attack)
dict_incidC_season, dict_incidA_season, dict_incidO_season, dict_attackCAO, dict_tot_attack, dict_perc_totAR = defaultdict(list), defaultdict(list), defaultdict(list), {}, {}, {}
for s in pseasons:
# list of incidence per 100,000 by week for children and adults
dict_incidC_season[s] = [dict_incid[week][0] for week in sorted(dict_wk) if dict_wk[week] == s]
dict_incidA_season[s] = [dict_incid[week][1] for week in sorted(dict_wk) if dict_wk[week] == s]
dict_incidO_season[s] = [dict_incid[week][2] for week in sorted(dict_wk) if dict_wk[week] == s]
# attack rates per 100,000 for children, adults, and other age groups by week, include only wks 40 to 20 (52 weeks total in each season)
dict_attackCAO[s] = (sum(dict_incidC_season[s][:33]), sum(dict_incidA_season[s][:33]), sum(dict_incidO_season[s][:33]))
# total attack rate per 100,000 by season for export
# dict_tot_attack[seasonnum] = total attack rate for weeks 40 to 20 by 100,000
dict_tot_attack[s] = float(sum(dict_attackCAO[s]))
# calculate percentage contribution of each age group's attack rate to total seasonal attack rate.
# dict_perc_totAR[seasonnum] = (% contribution of child AR to total AR, % contribution of adult AR to total AR, % contribution of other ages AR to total AR)
dict_perc_totAR[s] = tuple([AR/dict_tot_attack[s]*100 for AR in dict_attackCAO[s]])
return dict_perc_totAR, dict_tot_attack
##############################################
def cum_incid_at_classif(dict_wk, dict_incid53ls, dict_Thanksgiving, snum):
''' For a given season, calculate the cumulate incidence percentage for the weeks in the retrospective and early warning periods.
'''
# main(cum_incid_at_classif)
weekdummy = sorted([key for key in dict_wk if dict_wk[key] == snum])
# total season incidence
tot_incid = float(sum(dict_incid53ls[snum][:gp_fluweeks]))
# peak-based retrospective classification
peak_index = peak_flu_week_index(dict_incid53ls[snum])
print 'pk ix', peak_index
begin_retro = peak_index - gp_begin_retro_week
# list of week indices in retrospective period
retro_indices = xrange(begin_retro, begin_retro+gp_retro_duration)
cum_incid_retro = [sum(dict_incid53ls[snum][:i+1]) for i in retro_indices] # cumulative incidence up to and including index week
cum_perc_incid_retro = [incid/tot_incid*100 for incid in cum_incid_retro]
# Thanksgiving-based early warning classification
Thx_index = weekdummy.index(dict_Thanksgiving[snum])
begin_early = Thx_index + gp_begin_early_week
# list of week indices in early warning period
early_indices = xrange(begin_early, begin_early+gp_early_duration)
cum_incid_early = [sum(dict_incid53ls[snum][:i+1]) for i in early_indices] # cumulative incidence up to and including index week
cum_perc_incid_early = [incid/tot_incid*100 for incid in cum_incid_early]
return cum_perc_incid_retro, cum_perc_incid_early
##############################################
def epidemic_duration(incid53ls, min_cum_perc, max_cum_perc):
''' Return the number of weeks in an epidemic, given a list of the incidence curve for a complete season and the definition of epidemic duration. Epidemic duration is defined by the cumulative percentage of incidence during the flu epidemic.
'''
tot_incid = float(sum(incid53ls[:gp_fluweeks]))
cum_incid_perc = list(np.cumsum(incid53ls[:gp_fluweeks])/tot_incid*100)
epi_index_min = bisect.bisect(cum_incid_perc, min_cum_perc)
epi_index_max = bisect.bisect(cum_incid_perc, max_cum_perc)
epidemic_dur = epi_index_max-epi_index_min+1
return epidemic_dur
##############################################
def identify_retro_early_weeks(dict_wk, dict_incid53ls):
''' Identify weeks in the early warning and retrospective periods for each season using indices in list for incidence. Returns one dict: dict_indices[(snum, classif period)] = (first index, last index for index slicing)
'''
main(identify_retro_early_weeks)
# import Thanksgiving data
dict_Thanksgiving = Thanksgiving_import(csv_Thanksgiving)
dict_indices = {}
for s in pseasons:
weekdummy = sorted([wk for wk in dict_wk if dict_wk[wk] == s])
# identify retrospective week indices
peak_index = peak_flu_week_index(dict_incid53ls[s])
begin_retro = peak_index - gp_begin_retro_week
end_retro = begin_retro + gp_retro_duration
# identify early warning week indices
Thx_index = weekdummy.index(dict_Thanksgiving[s])
begin_early = Thx_index + gp_begin_early_week
end_early = begin_early + gp_early_duration
# create dictionary with early warning and retrospective indices by season
dict_indices[(s, 'r')] = (begin_retro, end_retro)
dict_indices[(s, 'e')] = (begin_early, end_early)
return dict_indices
# ##############################################
# fixdef identify_retro_early_weeks_spatial(dict_wk, dict_incid53ls, dict_incid53ls_reg, retro_level_string, csv_Thanksgiving):
# ''' Find the retrospective and early warning period start weeks by season. The retrospective period is designated relative to the spatial peak incidence week in the flu season. The early warning period is designated relative to the week of Thanksgiving. This function returns a dictionary that can be used by classif_zOR_region_processing to set the classification periods for each region to that which was defined at the national level.
# The week plotting dicts (national and regional) must be run before this function. The Thanksgiving_import function is nested within this function. Return dictionary for season to (index of first retro period week, index of first early warning period week).
# dict_classifindex[seasonnum] = (index of first retro period week, index of first early warning period week)
# '''
# main(classif_zOR_index)
# # for nation-level peak-based retrospective classification
# # dict_wk[week] = seasonnum, dict_incid53ls[seasonnum] = [ILI wk 40, ILI wk 41,...], dict_OR53ls[seasonnum] = [OR wk 40, OR wk 41, ...], dict_zOR53ls[seasonnum] = [zOR wk 40, zOR wk 41, ...]
# # for region-level peak-based retrospective classification
# # (don't want to call dict_wk twice), dict_incid53ls_reg[(seasonnum, region)] = [ILI wk 40, ILI wk 41,...], dict_OR53ls_reg[(seasonnum, region)] = [OR wk 40, OR wk 41, ...], dict_zOR53ls_reg[(seasonnum, region)] = [zOR wk 40, zOR wk 41, ...]
# dict_classifindex = {}
# # import Thanksgiving data
# dict_Thanksgiving = Thanksgiving_import(csv_Thanksgiving)
# for s, r in product(pseasons, gp_plotting_regions):
# weekdummy = sorted([key for key in dict_wk if dict_wk[key] == s])
# # nation-lvl peak-based retrospective classification
# if retro_level_string == 'nation':
# peak_index = peak_flu_week_index(dict_incid53ls[s]) # 7/31/14 - max among flu weeks
# begin_retro = peak_index - gp_begin_retro_week
# # region-lvl peak-based retrospective classif
# elif retro_level_string == 'region':
# peak_index = peak_flu_week_index(dict_incid53ls_reg[(s, r)]) # 7/31/14 - max among flu weeks
# begin_retro = peak_index - gp_begin_retro_week
# else:
# print 'retro_level_string error'
# break
# # Thanksgiving-based early warning classification
# Thx_index = weekdummy.index(dict_Thanksgiving[s])
# begin_early = Thx_index + gp_begin_early_week
# # dict_classifindex[(seasonnum, region)] = (index of first retro period week, index of first early warning period week)
# dict_classifindex[(s, r)] = (begin_retro, begin_early)
# return dict_classifindex
##############################################
def ILI_AR(csv_SDI):
''' Import data of the format: season, week, year, week number, ILI cases, any diagnosis cases, total population size (SQL_export/F1.csv, SQL_export/Supp_acuteILI_wk.csv). Return dictionary dict_facilitytypeAR[season] = ILI cases/total population * 100,000.
'''
# dict_facilitytypeAR[season] = ILI cases/total population in second year of flu season * 100,000
dict_facilitytypeAR = {}
dict_wk, dict_ILI_dummy = {}, {}
for row in csv_SDI:
season, week = int(row[0]), row[1]
ILI, pop = float(row[4]), int(row[6]) # pop is the same for every entry that takes place in the same year
wk = date(int(week[:4]), int(week[5:7]), int(week[8:]))
dict_wk[wk] = season
dict_ILI_dummy[wk] = (ILI, pop)
# list of unique season numbers
seasons = list(set([dict_wk[wk] for wk in dict_wk]))
# generate dict of attack rate per 100,000 for flu season (gp_fluweeks long)
for s in seasons:
dummyweeks = sorted([wk for wk in dict_wk if dict_wk[wk] == s])[:gp_fluweeks]
AR = sum([dict_ILI_dummy[wk][0] for wk in dummyweeks])/dict_ILI_dummy[dummyweeks[-1]][1] * 100000
dict_facilitytypeAR[s] = AR
return dict_facilitytypeAR
# ##############################################
# def ILINet_week_OR_processing(csv_incidence, csv_population):
# ''' Import CDC_Source/Import_Data/all_cdc_source_data.csv, which includes unique id, year, week, age group, and ILI incid. Import Census/Import_Data/totalpop_age_Census_98-14.csv, which includes season, age group code, and US population. Return dictionary with week to season number, week to ILI cases per 100,000 in total US population, and dictionary with week to OR. OR attack rates for children and adults will be calculated based on popstat variable of the population in the second calendar year of the flu season (eg. 2001-02 season is based on 2002 population). In ILINet, children are 5-24 years and adults are 25-64 years. In totalpop_age.csv, children are 5-19 years and adults are 20-59 years.
# dict_wk[week] = seasonnum
# dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season
# dict_OR[week] = OR
# '''
# main(ILINet_week_OR_processing)
# ## import ILI data ##
# # dict_ILI_week[(week, agegroup code)] = ILI cases; dict_wk[week] = seasonnum, S1 = 2000-01
# dict_ILI_week, dict_wk = {}, {}
# for row in csv_incidence:
# row_cl = [float('nan') if val == 'NA' else val for val in row]
# week = str(row_cl[0])+'0' # additional 0 represents Sunday
# wktime = datetime.strptime(week, '%Y%U%w') # format = 4-dig year, 2-dig week beginning on Monday (8/10/14), digit representing day of week; data are from one week later than week number listed on plots (for both ILINet and SDI data).
# wk = datetime.date(wktime) # remove the time from the datetime format
# dict_ILI_week[(wk, 'C')] = float(row_cl[33])
# dict_ILI_week[(wk, 'A')] = float(row_cl[34])
# dict_ILI_week[(wk, 'O')] = float(row_cl[27])-float(row_cl[33])-float(row_cl[34])
# dict_wk[wk] = int(row_cl[12])
# ## import population data ##
# dict_pop = {}
# for row in csv_population:
# season = int(row[0])
# agecode = row[1]
# # dict_pop[(season, agegroup code)] = population size of agegroup
# dict_pop[(season, agecode)] = int(row[2])
# # generate incidence per 100,000 in US population and OR at the weekly level
# dict_incid, dict_OR = {}, {}
# for wk in dict_wk:
# s = dict_wk[wk]
# # dict_incid[week] = ILI incidence per 100,000 in US pop in second calendar year of flu season
# tot_incid = sum([dict_ILI_week[(wk, age)] for age in age_keys])/sum([dict_pop[(s, age)] for age in age_keys]) * 100000
# dict_incid[wk] = tot_incid
# # dict_OR[week] = OR
# child_attack = dict_ILI_week[(wk, 'C')]/dict_pop[(s, 'C')]
# adult_attack = dict_ILI_week[(wk, 'A')]/dict_pop[(s, 'A')]
# OR = (child_attack/(1-child_attack))/(adult_attack/(1-adult_attack))
# dict_OR[wk] = 1/float(OR) # 10/14/14 swap age
# return dict_wk, dict_incid, dict_OR
##############################################
def normalize_attackCA(dict_wk, dict_ageIncidAdjust53ls):
''' Import dict_wk and dict_incid. Sum values in dict_incid for children and adults to get an attack rate for each season. The flu season is defined as weeks 40 to 20. Normalize the child and adult attack rates by dividing the raw attack rate by the average child and adult attack rates and subtract 1 (percentage deviation from baseline) across all seasons. week_incidCA_processing should be run first. Returns one dict:
dict_attackCA_norm[seasonnum] = (% dev from baseline child attack rate, % dev from baseline adult attack rate)
'''
main(normalize_attackCA)
dict_attackCA, dict_attackCA_norm = {}, {}
for s in pseasons:
# attack rates per 100,000 for children and adults by week, include only wks 40 to 20 (53 weeks total in each season)
dict_attackCA[s] = (sum(dict_ageIncidAdjust53ls[(s, 'C')][:gp_fluweeks]), sum(dict_ageIncidAdjust53ls[(s, 'A')][:gp_fluweeks]))
# calculate average C and A attack rates across all seasons
avg_attackC = float(np.mean([dict_attackCA[k][0] for k in dict_attackCA]))
avg_attackA = float(np.mean([dict_attackCA[k][1] for k in dict_attackCA]))
avgs = (avg_attackC, avg_attackA)
# normalize incidence curves by average attack rate for the age group, subtract 1 to center the ratio at 0, multiply by 100 to recover the percent deviation in seasonal attack rate from baseline average of attack rates across all seasons in study
for s in pseasons:
# dict_attackCA_norm[seasonnum] = (% dev from baseline C attack rate, % dev from baseline A attack rate)
dict_attackCA_norm[s] = (((dict_attackCA[s][0]/avgs[0])-1)*100, ((dict_attackCA[s][1]/avgs[1])-1)*100)
return dict_attackCA_norm
##############################################
def normalize_incidCA(dict_wk, dict_ageIncidAdjust53ls):
''' Import dict_wk and dict_ageIncidAdjust53ls and normalize incidence values in dict_ageIncidAdjust53ls by the maximum child incidence value during a flu season. week_incidCA_processing should be run first.
dict_ageIncidAdjustNorm53ls[(seasonnum, age)] = [norm incid wk40, norm incid wk41, ...]
'''
main(normalize_incidCA)
dict_ageIncidAdjustNorm53ls = defaultdict(list)
for s in pseasons:
maxC = max(dict_ageIncidAdjust53ls[(s, 'C')][:gp_fluweeks]) # max peak child epidemic during wks 40 to 20
# normalize incidence curves by max peak child epidemic
dict_ageIncidAdjustNorm53ls[(s, 'C')] = [val/maxC for val in dict_ageIncidAdjust53ls[(s, 'C')]]
dict_ageIncidAdjustNorm53ls[(s, 'A')] = [val/maxC for val in dict_ageIncidAdjust53ls[(s, 'A')]]
return dict_ageIncidAdjustNorm53ls
##############################################
def peak_flu_week_index(incid53ls):
''' Return index of peak week during the flu season when passed a list of the weekly incidence for the entire year (weeks 40 to 39).
'''
main(peak_flu_week_index)
peak_index = incid53ls.index(max(incid53ls[:gp_fluweeks]))
return peak_index
##############################################
def proportion_ILI_anydiag(csv_SDI):
''' Import data of the format: season, week, year, week number, ILI cases, any diagnosis cases, total population size (SQL_export/F1.csv, SQL_export/Supp_acuteILI_wk.csv). Return dictionary dict_ILI_anydiag[season] = ILI cases/any diagnosis cases.
'''
# dict_ILI_anydiag[season] = ILI cases/any diagnosis cases
dict_ILI_anydiag = {}
dict_wk, dict_prop_dummy = {}, {}
for row in csv_SDI:
season, week = int(row[0]), row[1]
ILI, anydiag = float(row[4]), int(row[5])
wk = date(int(week[:4]), int(week[5:7]), int(week[8:]))
dict_wk[wk] = season
dict_prop_dummy[wk] = (ILI, anydiag)
# list of unique season numbers
seasons = list(set([dict_wk[wk] for wk in dict_wk]))
# generate dict of ILI proportion of all cases at season level for flu weeks only
for s in seasons:
dummyweeks = sorted([wk for wk in dict_wk if dict_wk[wk] == s])[:gp_fluweeks]
prop = sum([dict_prop_dummy[wk][0] for wk in dummyweeks])/sum([dict_prop_dummy[wk][1] for wk in dummyweeks])
dict_ILI_anydiag[s] = prop
return dict_ILI_anydiag
##############################################
def readNationalClassifFile(national_file):
''' Import national classification file (season, mn_retro, mn_early) into dict.
'''
main(readNationalClassifFile)
dict_national_classif = {}
for line in national_file:
season = int(line[0])
mean_retro_zOR, mean_early_zOR = float(line[1]), float(line[2])
dict_national_classif[season] = (mean_retro_zOR, mean_early_zOR)
return dict_national_classif
##############################################
def readStateClassifFile(state_file):
''' Import state classification file (season, state, mn_retro, mn_early) into dict.
'''
main(readStateClassifFile)
dict_state_classif = {}
for line in state_file:
season, state = int(line[0]), str(line[1])
mean_retro_zOR, mean_early_zOR = float(line[2]), float(line[3])
dict_state_classif[(season, state)] = (mean_retro_zOR, mean_early_zOR)
return dict_state_classif
##############################################
def region_state_dictionary():
''' Create dictionary with HHS region number and list of states in continental US in that region. Drawing state-level choropleths in ggplot2 requires a dataset with state names and the value for the choropleth.
dict_region_state[season] = [state1 in region, state2, in region, ...]
'''
main(region_state_dictionary)
dict_region_state = defaultdict(list)
dict_region_state[1] = ["connecticut","maine","massachusetts","new hampshire","rhode island","vermont"]
dict_region_state[2] = ["new york","new jersey"]
dict_region_state[3] = ["delaware","district of columbia","maryland","pennsylvania","virginia","west virginia"]
dict_region_state[4] = ["alabama", "florida","georgia","kentucky","mississippi","north carolina","south carolina","tennessee"]
dict_region_state[5] = ["illinois","indiana","michigan","minnesota","ohio","wisconsin"]
dict_region_state[6] = ["arkansas","louisiana","new mexico","oklahoma","texas"]
dict_region_state[7] = ["iowa","kansas","missouri","nebraska"]
dict_region_state[8] = ["colorado","montana","north dakota","south dakota","utah","wyoming"]
dict_region_state[9] = ["arizona","california","nevada"]
dict_region_state[10] = ["idaho","oregon","washington"]
return dict_region_state
##############################################
def season_H3perc_CDC(csvreadfile):
''' Import SQL_EXPORT/subtype5.csv data, which includes information on prominent subtype, subtypes of isolates that were identified, and isolates that match with the vaccine strains. Return a dictionary with season and proportion of H3 isolates of all isolates collected that season. The original source of isolate information is the CDC Flu Season Summaries, CDC surveillance system (not the WHO/NREVSS system).
dict_H3[seasonnum] = proportion of H3 isolates of all isolates collected that season
'''
main(season_H3perc_CDC)
dict_dummy = {}
for row in csvreadfile:
H1i, H3i, Bi, TOTi = float(row[4]), float(row[5]), float(row[6]), float(row[7])
season = int(row[0]) # season number
# include only seasons in pseasons in returned dictionary
dict_dummy[season] = H3i/TOTi
# dict_H3[seasonnum] = proportion H3 isolates of all isolates collected that season
dict_H3 = dict((s, dict_dummy[s]) for s in pseasons)
return dict_H3
##############################################
def season_H3perc_NREVSS(csvreadfile):
''' Import My_Work/Clean_Data_for_Import/NREVSS_Isolates_Season_improved.csv data, which includes information on year, number of samples positive for flu, A samples, B samples, subtyped A samples, A/H1 samples, A/H3 samples, B samples, A/2009H1N1 samples, total speciments tested. Return a dictionary with season and proportion of H3 isolates of all subtyped flu isolates collected that season. The original source of isolate information is the CDC Flu Season Summaries, WHO NREVSS surveillance system (not the CDC system).
dict_H3[seasonnum] = proportion of H3 isolates of all isolates collected that season
'''
main(season_H3perc_NREVSS)
dict_dummy = {}
for row in csvreadfile:
A_H1, A_H3, A_09, B, H3N2v = int(row[2]), int(row[4]), int(row[5]), int(row[6]), int(row[7])
TOTi = A_H1 + A_H3 + A_09 + B + H3N2v
H3i = float(row[4])
season = int(row[0]) - 2000 # season number
# include only seasons in pseasons in returned dictionary
dict_dummy[season] = H3i/TOTi
# dict_H3[seasonnum] = proportion H3 isolates of all isolates collected that season
dict_H3 = dict((s, dict_dummy[s]) for s in pseasons)
return dict_H3
##############################################
def season_vaxmatch(csvreadfile):
''' Import SQL_EXPORT/subtype5.csv data, which includes information on prominent subtype, subtypes of isolates that were identified, and isolates that match with the vaccine strains. Return a dictionary with season and proportion of isolates that match the trivalent vaccine of total isolates subtyped. The original source of isolate information is the CDC Flu Season Summaries, CDC surveillance system.
dict_vaxmatch[seasonnum] = proportion of isolates matching the vaccine strain out of the total number of isolates subtyped.
'''
main(season_vaxmatch)
dict_vaxmatch = {}
for row in csvreadfile:
# total isolates, matched isolates
TOTi, TOTm = float(row[7]), float(row[11])
season = int(row[0]) # season number
dict_vaxmatch[season] = TOTm/TOTi
return dict_vaxmatch
##############################################
def Thanksgiving_H3perc_NREVSS(csvreadfile):
''' Import My_Bansal_Lab/Clean_Data_for_Import/NREVSS_Isolates_Thanksgiving.csv data, which includes information on seasons (eg. 2004 is 2003-04 season), total specimens tested, A/H1 samples, A/unable to subtype, A/H3 samples, A/2009H1N1 samples, B samples, H3N2v samples. Return a dictionary with season and proportion of H3 isolates of all subtyped flu isolates collected that season. The original source of isolate information is the CDC Flu Season Summaries, WHO NREVSS surveillance system (not the CDC system).
dict_H3[seasonnum] = proportion of H3 isolates of all isolates collected that season
'''
main(Thanksgiving_H3perc_NREVSS)
dict_dummy = {}
for row in csvreadfile:
a_H1, a_H3, a_09, B, H3N2v = int(row[2]), float(row[4]), int(row[5]), int(row[6]), int(row[7])
TOTi = a_H1 + a_H3 + a_09 + B + H3N2v
season = int(row[0]) - 2000 # season number
# include only seasons in pseasons in returned dictionary
dict_dummy[season] = a_H3/TOTi
# dict_H3[seasonnum] = proportion H3 isolates of all isolates collected that season
dict_H3 = dict((s, dict_dummy[s]) for s in pseasons)
return dict_H3
##############################################
def Thanksgiving_import(csv_Thanksgiving):
''' Import Thanksgiving data from My_Bansal_Lab/ThanksgivingWeekData_cl.csv. Columns in dataset are year, week, total number of specimens, A/H1 samples, A/unable to subtype samples, A/H3 samples, A/2009H1N1 samples, A/no subtype information samples, B samples, A/H3N2v samples, percent of samples positive for flu, HHS region number, unique ID, season (the second calendar year of the flu season), date of the Sunday immediately preceding Thanksgiving. Return a dictionary with season to Sunday date of Thanksgiving week. These dates are used to determine which weeks fall under the early warning classification period.
dict_Thanksgiving[seasonnum] = date of the Sunday immediately preceding Thanksgiving
'''
main(Thanksgiving_import)
dict_Thanksgiving = {}
for row in csv_Thanksgiving:
week_og = row[14]
Twk = date(int(week_og[6:]), int(week_og[:2]), int(week_og[3:5]))
season = int(row[13])-2000 # 6/18/14, so season 1998 is converted to -2
# dict_Thanksgiving[seasonnum] = date of the Sunday immediately preceding Thanksgiving
dict_Thanksgiving[season] = Twk
return dict_Thanksgiving
##############################################
def week_anydiag_processing(csv_anydiag):
''' Import SQL_export/anydiag_allweeks_outpatient.csv and calculate number of visits/population per 100,000. dict_any[week] = visits per 100,000 in US pop in calendar of data week, dict_any53ls
'''
main(week_anydiag_processing)
dict_wk, dict_any = {}, {}
dict_any53ls = defaultdict(list)
for row in csv_anydiag:
week = row[1]
wk = date(int(week[:4]), int(week[5:7]), int(week[8:]))
dict_wk[wk] = int(row[0])
dict_any[wk] = float(row[2])/int(row[3])*100000
# plotting version of dict_any
for s in pseasons:
any53dummy = [dict_any[wk] for wk in sorted(dict_wk) if dict_wk[wk] == s]
if len(any53dummy) == 52:
a53any = (any53dummy[12]+any53dummy[13])/2.
any53dummy.insert(13, a53any)
dict_any53ls[s] = any53dummy
return dict_wk, dict_any, dict_any53ls
##############################################
def week_incidCA_processing(csv_incidence, csv_population):
''' Import SQL_export/OR_allweeks_outpatient.csv data (or other OR_allweeks...csv data), which includes season number, week, age group, and ILI incid. Import SQL_export/totalpop_age.csv data, which includes calendar year, age group, and US population. Includes coverage and ILI care-seeking adjustment. Return two dicts:
dict_wk[week] = seasonnum
dict_ageIncidAdjust53ls[(season, age)] = [adj incid per 100000 wk 40, ... wk 39]
'''
main(week_incidCA_processing)
## import ILI data ##
# dict_ILI_week[(week, agegroup code)] = ILI cases; dict_wk[week] = seasonnum
dict_ILI_week, dict_wk = {}, {}
for row in csv_incidence:
week = row[1]
wk = date(int(week[:4]), int(week[5:7]), int(week[8:]))
dict_ILI_week[(wk, str(row[2]))] = float(row[3])
dict_wk[wk] = int(row[0])
# fill dict_ILI_week with 0 if ILI cases for a certain age group are missing
print 'missing ILI wks and ages'
for wk, age in product(dict_wk, age_keys):
if (wk, age) not in dict_ILI_week:
print (wk, age)
dict_ILI_week[(wk, age)] = float(0)
## import population data ##
dict_pop_age = {}
for row in csv_population:
calendar_year = str(row[0])
season = int(calendar_year[2:])
age = row[1]
# dict_pop_age[(seasonnum, age in text)] = population
dict_pop_age[(season, age)] = int(row[2])
# dict_pop[(season, agegroup code)] = population size of agegroup
seasons = list(set([k[0] for k in dict_pop_age]))
age_texts = list(set([k[1] for k in dict_pop_age])) # age bins
dict_pop = {}
for s, ak in product(seasons, age_keys):
dict_pop[(s, ak)] = float(sum([dict_pop_age[(s, at)] for at in age_texts if at in dict_ages[ak]]))
# adjust ILI cases by increasing coverage over time and constant age-specific ILI seeking behavior (age-specific and total ILI cases)
fname = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/anydiag_allweeks_outpatient_age.csv'
# dict_ageILIAdjust53ls[(season, age)] = [ILI * (visits in flu season 9)/(visits in flu season #)/(ILI care-seeking behavior) wk 40, ...wk 39]
dict_ageILIAdjust53ls, _, _ = coverageCareseek_adjustment(fname, dict_ILI_week)
# calculate adjusted incidence rate by age group and season
# dict_ageIncidAdjust53ls[(season, age)] = [adj incid per 100000 wk 40, ... wk 39]
dict_ageIncidAdjust53ls = defaultdict(list)
for s, age in product(pseasons, age_keys):
dict_ageIncidAdjust53ls[(s, age)] = [adjILI/dict_pop[(s, age)]*100000 for adjILI in dict_ageILIAdjust53ls[(s, age)]]
return dict_wk, dict_ageIncidAdjust53ls
##############################################
def coverageCareseek_adjustment(filename_anydiagAge, dict_ILI_week):
''' Import any diagnosis visits by week and age group. Adjust age-specific ILI and total ILI by changing coverage by season (visits in season 9/visits in season #) and ILI-seeking behavior. Function argument is the filename string. Return three dicts: dict_ageILIAdjust53ls[(season, age)] = [ILI * (visits in flu season 9)/(visits in flu season #)/(ILI care-seeking behavior) wk 40, ...wk 39]; dict_totILI53ls[season] = [ILI wk 40,... ILI wk 39]; dict_totILIAdjust53ls = [adj ILI wk 40, ... adj ILI wk 39]
'''
main(coverageCareseek_adjustment)
anydiagin = open(filename_anydiagAge, 'r')
anydiagin.readline() # rm header
anydiag = csv.reader(anydiagin, delimiter=',')
dict_anyvisit_week, dict_wk = {},{}
for row in anydiag:
season = int(row[0])
week = row[1]
wk = date(int(week[:4]), int(week[5:7]), int(week[8:]))
agecode = str(row[2])
visits = int(row[3])
dict_anyvisit_week[(wk, agecode)] = float(visits)
dict_wk[wk] = (season, wk.isocalendar()[1]+1) # isocalendar wk number+1 because isocalendar goes from Monday to Sunday
# estimate number of ILI cases that would have been captured in the dataset had the coverage been at 08-09 flu season levels for all years
dict_anyvisit_season, dict_ili_season, dict_totILI53ls, dict_totILIAdjust53ls = {}, defaultdict(list), defaultdict(list), defaultdict(list)
for s in pseasons:
dummyweeks = sorted([wk for wk in dict_wk if dict_wk[wk][0] == s])
for age in age_keys:
Visits = [dict_anyvisit_week[(wk, age)] for wk in dummyweeks]
ILI = [dict_ILI_week[(wk, age)] for wk in dummyweeks]
if len(dummyweeks) == 52:
Visits.insert(13, (Visits[12]+Visits[13])/2.)
ILI.insert(13, (ILI[12]+ILI[13])/2.)
dict_anyvisit_season[(s, age)] = sum(Visits[:gp_fluweeks]) # total any diagnosis visits during flu season
dict_ili_season[(s, age)] = ILI
# list of lists for ili counts for all age groups
all_lists = [dict_ili_season[(s, age)] for age in age_keys]
# raw ili time series by season
dict_totILI53ls[s] = [sum(ili) for ili in zip(*all_lists)]
# create total incidence dict with coverage and ILI care-seeking adjustmets
for s in pseasons:
visit9_Tpop = sum([dict_anyvisit_season[(9, age)] for age in age_keys])
visitS_Tpop = sum([dict_anyvisit_season[(s, age)] for age in age_keys])
# adjustment for total incidence dict
Tadjustment = visit9_Tpop/visitS_Tpop/dict_careseek_nat['T']
dict_totILIAdjust53ls[s] = [ili*Tadjustment for ili in dict_totILI53ls[s]]
# create age-specific incidence dict with coverage and ILI care-seeking behavior adjustments
dict_ageILIAdjust53ls = defaultdict(list)
for key in dict_ili_season:
s, age = key
careseek = dict_careseek_nat[age] # defined at top
iliDummy = dict_ili_season[key]
visit9 = dict_anyvisit_season[(9, age)]
visitS = dict_anyvisit_season[key]
# adjust ILI by coverage level in 08-09 flu season and care seeking behavior in that age group
adjustment = visit9/visitS/careseek
dict_ageILIAdjust53ls[key] = [ili*adjustment for ili in iliDummy]
return dict_ageILIAdjust53ls, dict_totILI53ls, dict_totILIAdjust53ls
##############################################
def week_OR_processing(csv_incidence, csv_population):
''' Import SQL_export/OR_allweeks_outpatient.csv data (or other OR_allweeks...csv data), which includes season number, week, age group, and ILI incid. Import SQL_export/totalpop_age.csv data, which includes calendar year, age group, and US population. Function 'coverageCareseek_adjustment' is nested. Return five dicts:
dict_wk[wk] = seasonnum
dict_totIncid53ls[s] = [incid rate per 100000 wk40,... incid rate per 100000 wk 39] (unadjusted ILI incidence)
dict_totIncidAdj53ls[s] = [adjusted incid rate per 100000 wk 40, ...adj incid wk 39] (total population adjusted for coverage and ILI care-seeking behavior)
dict_RR53ls[s] = [RR wk 40,... RR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior)
dict_zRR53ls[s] = [zRR wk 40,... zRR wk 39] (children and adults adjusted for SDI data coverage and ILI care-seeking behavior)
'''
main(week_OR_processing)
## import ILI data ##
# dict_ILI_week[(week, agegroup code)] = ILI cases; dict_wk[week] = seasonnum
dict_ILI_week, dict_wk = {}, {}
for row in csv_incidence:
week = row[1]
wk = date(int(week[:4]), int(week[5:7]), int(week[8:]))
dict_ILI_week[(wk, str(row[2]))] = float(row[3])
dict_wk[wk] = int(row[0])
## import population data ##
dict_pop_age = {}
for row in csv_population:
calendar_year = str(row[0])
season = int(calendar_year[2:])
age = row[1]
# dict_pop_age[(seasonnum, age in text)] = population
dict_pop_age[(season, age)] = int(row[2])
# dict_pop[(season, agegroup code)] = population size of agegroup
seasons = list(set([k[0] for k in dict_pop_age]))
age_texts = list(set([k[1] for k in dict_pop_age])) # age bins
dict_pop = {}
for s, ak in product(seasons, age_keys):
dict_pop[(s, ak)] = float(sum([dict_pop_age[(s, at)] for at in age_texts if at in dict_ages[ak]]))
# adjust ILI cases by increasing coverage over time and constant age-specific ILI seeking behavior (age-specific and total ILI cases)
fname = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/anydiag_allweeks_outpatient_age.csv'
# dict_ageILIAdjust53ls[(season, age)] = [ILI * (visits in flu season 9)/(visits in flu season #)/(ILI care-seeking behavior) wk 40, ...wk 39]; dict_totILI53ls[season] = [ILI wk 40,... ILI wk 39]; dict_totILIAdjust53ls = [adj ILI wk 40, ... adj ILI wk 39]
dict_ageILIadj_season, dict_totILI53ls, dict_totILIadj53ls = coverageCareseek_adjustment(fname, dict_ILI_week)
dict_totIncid53ls, dict_totIncidAdj53ls, dict_RR53ls, dict_zRR53ls = defaultdict(list), defaultdict(list), defaultdict(list), defaultdict(list)
# generate adjusted incidence per 100,000 in US population, OR, and zOR at the weekly level
for s in pseasons:
# total population in the season
tot_pop = sum([dict_pop[(s, ak)] for ak in age_keys])
# totIncid53ls dict
dict_totIncid53ls[s] = [ili/tot_pop*100000 for ili in dict_totILI53ls[s]]
# totIncidAdj53ls
dict_totIncidAdj53ls[s] = [iliAdj/tot_pop*100000 for iliAdj in dict_totILIadj53ls[s]]
# RR53ls dict
child_attack = [adjILI/dict_pop[(s, 'C')] for adjILI in dict_ageILIadj_season[(s, 'C')]]
adult_attack = [adjILI/dict_pop[(s, 'A')] for adjILI in dict_ageILIadj_season[(s, 'A')]]
# 10/19/14: RR should not be evaluated if child or adult incidence is zero
# 10/16/14 change OR to relative risk
RR = [a/c if c and a else float('nan') for c, a in zip(child_attack, adult_attack)]
dict_RR53ls[s] = RR
# zRR53ls dict
normalization_period = dict_RR53ls[s][:gp_normweeks]
season_mean = np.mean(normalization_period)
season_sd = np.std(normalization_period)
dict_zRR53ls[s] = [(val-season_mean)/season_sd for val in dict_RR53ls[s]]
return dict_wk, dict_totIncid53ls, dict_totIncidAdj53ls, dict_RR53ls, dict_zRR53ls
##############################################
def week_import_zip3(csv_incidence_region, csv_population_region):
''' Import R_export/OR_zip3_week_outpatient_cl.csv data, which includes season number, week, zip3, age group, and ILI incid. Import R_export/popstat_zip3_season_cl.csv data, which includes calendar year, uqsza, popstat, season, age group, state, lat, long, and HHS region. Returns 4 dicts:
dict_wk[week] = seasonnum
dict_weekZip3_ili[(wk, zip3, age)] = ili
dict_seasZip3_pop[(season, zip3, age)] = pop in 2nd calendar year of flu season
dict_zip3_region[zip3] = (state, hhs)
'''
main(week_import_zip3)
## import ILI data ##
dict_weekZip3_ili, dict_wk = {}, {}
for row in csv_incidence_region:
week, season = row[1], int(row[0])
wk = date(int(week[:4]), int(week[5:7]), int(week[8:]))
zip3, age, ili = str(row[2]), str(row[3]), int(row[4])
dict_wk[wk] = season
dict_weekZip3_ili[(wk, zip3, age)] = ili
## import population data ##
dict_seasZip3_pop, dict_zip3_region = {}, {}
for row in csv_population_region:
season, zip3, age, pop = int(row[3]), str(row[0]), str(row[4]), int(row[2])
state, hhs = str(row[5]), int(row[8])
dict_seasZip3_pop[(season, zip3, age)] = pop
dict_zip3_region[zip3] = (state, hhs)
return dict_wk, dict_weekZip3_ili, dict_seasZip3_pop, dict_zip3_region
##############################################
def week_ILI_processing_spatial(dict_wk, dict_weekZip3_ili, dict_seasZip3_pop, dict_zip3_region, spatial_level):
''' Aggregate ILI data from zip3 to state or region level by season and age group. Run 'week_import_zip3' first. Returns two dicts:
dict_seasSpatialAge_iliLS[(season, spatial, agegroup)] = [ILI cases wk 40, ... wk 39]
dict_seasSpatial_pop[(season, spatial, agegroup)] = population in 2nd year of flu season
'''
main(week_ILI_processing_spatial)
# branching for state vs. region level analysis
if spatial_level == 'state':
spatial_keys = list(set([dict_zip3_region[k][0] for k in dict_zip3_region]))
# index for state info in dict_zip3_region
code = 0
elif spatial_level == 'region':
spatial_keys = list(set([dict_zip3_region[k][1] for k in dict_zip3_region]))
# index for region info in dict_zip3_region
code = 1
# aggregate ili to spatial level
dict_seasSpatialAge_iliLS = defaultdict(list)
for spatial, season in product(spatial_keys, pseasons):
dummyzip3 = [zip3 for zip3 in dict_zip3_region if dict_zip3_region[zip3][code] == spatial]
dummyweeks = sorted([wk for wk in dict_wk if dict_wk[wk] == season])
# by age group
for age in age_keys:
dummyallILI_spatial = [[dict_weekZip3_ili.get((wk, zip3, age), 0) for wk in dummyweeks] for zip3 in dummyzip3]
dict_seasSpatialAge_iliLS[(season, spatial, age)] = [sum(ili) for ili in zip(*dummyallILI_spatial)]
# aggregate population to spatial level
dict_seasSpatial_pop = {}
for s, spatial, ak in product(pseasons, spatial_keys, age_keys):
dict_seasSpatial_pop[(s, spatial, ak)] = float(sum([dict_seasZip3_pop[(s, z, ak)] for z in dict_zip3_region if dict_zip3_region[z][code] == spatial]))
return dict_seasSpatialAge_iliLS, dict_seasSpatial_pop
##############################################
def covCareseek_adjustment_spatial(dict_seasSpatialAge_iliLS, dict_zip3_region, spatial_level):
''' Perform SDI data coverage and ILI care-seeking adjustments at the spatial level (state or region). Analogous to coverageCareseek_adjustment (nation-level) function. Nested in week_RR_processing_spatial function. Returns four items:
dict_spatialAgeILIadj53ls[(s, spatial, age)] = [adj ILI wk 40, ... wk 39]
dict_spatialTotILI53ls[(s, spatial)] = [tot pop ILI wk 40, ... wk 39]
dict_spatialTotAdjILI53ls[(s, sptatial)] = [adj tot pop ILI wk 40, ... wk 39]
spatial_keys = sorted list of states or regions, according to spatial_level argument
'''
main(covCareseek_adjustment_spatial)
# import any diagnosis visit data at zip3 level
visitFilename = ''
anydiagin = open(visitFilename, 'r')
anydiagin.readline() # rm header
anydiag = csv.reader(anydiagin, delimiter=',')
dict_wk, dict_weekZip3Age_visit = {}, {}
for row in anydiag:
season = int(row[0])
week = row[1]
wk = date(int(week[:4]), int(week[5:7]), int(week[8:]))
zip3 = str(row[2])
agecode = str(row[3])
visits = float(row[4])
dict_weekZip3Age_visit[(wk, zip3, agecode)] = visits
dict_wk[wk] = season
# branching for state vs. region level analysis
if spatial_level == 'state':
spatial_keys = sorted(list(set([dict_zip3_region[k][0] for k in dict_zip3_region])))
# index for state info in dict_zip3_region
code = 0
elif spatial_level == 'region':
spatial_keys = sorted(list(set([dict_zip3_region[k][1] for k in dict_zip3_region])))
# index for region info in dict_zip3_region
code = 1
# aggregate any diagnosis data to spatial level of interest, add week 53 to any diagnosis and ILI data, create dict for total incidence by spatial level
dict_spatialAge_visits, dict_spatialAgeILI53ls, dict_spatialTotILI53ls = {}, defaultdict(list), defaultdict(list)
for spatial, s in product(spatial_keys, pseasons):
dummyzip3 = [zip3 for zip3 in dict_zip3_region if dict_zip3_region[zip3][code] == spatial]
dummyweeks = sorted([wk for wk in dict_wk if dict_wk[wk] == s])
# by age group
for age in age_keys:
# DATA INTERPOLATION
dummyallvisits_spatial = [[dict_weekZip3Age_visit.get((wk, zip3, age), 0) for wk in dummyweeks] for zip3 in dummyzip3]
spatialVisits = [sum(visit) for visit in zip(*dummyallvisits_spatial)]
spatialILI = dict_seasSpatialAge_iliLS[(s, spatial, age)]
if len(spatialVisits) == 52:
spatialVisits.insert(13, (spatialVisits[12]+spatialVisits[13])/2.)
spatialILI.insert(13, (spatialILI[12]+spatialILI[13])/2.)
# total visits during flu season
dict_spatialAge_visits[(s, spatial, age)] = sum(spatialVisits[:gp_fluweeks])
# weekly ILI cases
dict_spatialAgeILI53ls[(s, spatial, age)] = spatialILI
# create raw total incidence dict
all_lists = [dict_spatialAgeILI53ls[(s, spatial, age)] for age in age_keys]
dict_spatialTotILI53ls[(s, spatial)] = [sum(ili) for ili in zip(*all_lists)]
# import dict_careseek_spatial[spatial key] = weighted average of % ILI seeking
dict_careseek_spatial = create_dict_careseek_spatial(spatial_level)
# create total incidence dict adjusted for coverage and ILI care seeking # 10/30: convert careseek from age-specific to total pop
for key in dict_spatialTotILI53ls:
s, spatial = key
visit9_Tpop = sum([dict_spatialAge_visits[(9, spatial, age) for age in age_keys])
visitS_Tpop = sum([dict_spatialAge_visits[(s, spatial, age)] for age in age_keys])
Tadjustment = visit9_Tpop/visitS_Tpop/dict_careseek_spatial[spatial]
dict_spatialTotAdjILI53ls[key] = [ili*Tadjustment for ili in dict_spatialTotILI53ls[key]]
# create age-specific incidence dict with coverage and ILI care-seeking behavior adjustments
dict_spatialAgeILIadj53ls = defaultdict(list)
for key in dict_spatialAgeILI53ls:
s, spatial, age = key
careseek = dict_careseek_spatial[(spatial, age)]
visit9 = dict_spatialAge_visits[(9, spatial, age)]
visitS = dict_spatialAge_visits[key]
iliDummy = dict_spatialAgeILI53ls[key]
# adjust ili by coverage level in 08-09 flu season and ILI care seeking behavior in that age group
adjustment = visit9/visitS/careseek
dict_spatialAgeILIadj53ls[key] = [ili * adjustment for ili in iliDummy]
return dict_spatialAgeILIadj53ls, dict_spatialTotILI53ls, dict_spatialTotAdjILI53ls, spatial_keys
##############################################
def create_dict_careseek_spatial(spatial_level):
''' 10/30/14 Use Census region estimates of ILI care seeking from Biggerstaff2012 for all states in the region.
'''
main(create_dict_careseek_spatial)
# import data
filename = '/home/elee/Dropbox/Elizabeth_Bansal_Lab/CDC_Source/from_Matt/clean/ili_health_care_seek_bystate_summary.csv'
filein = open(filename, 'r')
filein.readline() # rm header
csvfile = csv.reader(filein, delimiter=',')
# dict_careseek_spatial[(spatial, age)] = proportion ILI care-seeking
dict_careseek_spatial = {} # global dict: dict_careseek_census
if spatial_level == 'state':
dict_state_census = {}
for row in csvfile:
stateAbbr, census = str(row[1]), str(row[7])
dict_state_census[stateAbbr] = census
# assign ILI care-seeking proportion of state-age combination to same as census-age
for spatial, age in product(dict_state_census, age_keys):
# identify census region of state
censusRegion = dict_state_census[spatial]
dict_careseek_spatial[(spatial, age)] = dict_careseek_census[(censusRegion, age)]
# elif spatial_level == 'region':
# filename = ''
return dict_careseek_spatial
##############################################
def week_RR_processing_spatial(dict_wk, dict_seasSpatialAge_iliLS, dict_seasSpatial_pop, dict_zip3_region, spatial_level):
''' Run function week_ILI_processing_state or region first. Function 'covCareseek_adjustment_spatial' is nested. Returns four dicts:
dict_spatialTotIncid53ls = [(s, spatial)] = [tot incid per 100,000 wk 40, ... wk 39]
dict_spatialTotIncidAdj53ls = [(s, spatial)] = [adj tot incid per 100,000 wk 40, ... wk 39]
dict_spatialRR53ls[(s, spatial)] = [RR wk 40, ... wk 39] based on adj ILI
dict_spatialZRR53ls[(s, spatial)] = [zRR wk 40, ... wk 39] based on adj ILI
'''
main(week_RR_processing_spatial)
# spatial keys == state_keys or region_keys
dict_spatialAgeILIadj53ls, dict_spatialTotILI53ls, dict_spatialTotAdjILI53ls, spatial_keys = covCareseek_adjustment_spatial(dict_seasSpatialAge_iliLS, dict_zip3_region, spatial_level)
dict_spatialTotIncid53ls, dict_spatialTotIncidAdj53ls, dict_spatialRR53ls, dict_spatialZRR53ls = defaultdict(list), defaultdict(list), defaultdict(list), defaultdict(list)
# generate total and adjusted total incidence per 100,000, relative risk in adults:children, and z-normalized relative risk at the specified "spatial_level" (state or region)
for s, spatial in product(pseasons, spatial_keys):
# total pop of spatial_level in the season
spatial_pop = sum([dict_seasState_pop[(s, spatial, age)] for age in age_keys])
# spatialTotIncid53ls dict
dict_spatialTotIncid53ls[(s, spatial)] = [ili/spatial_pop*100000 for ili in dict_spatialTotILI53ls[(s, spatial)]]
# spatialTotIncidAdj53ls dict
dict_spatialTotIncidAdj53ls[(s, spatial)] = [iliAdj/spatial_pop*100000 for ili in dict_spatialTotAdjILI53ls[(s, spatial)]]
# spatialRR53ls dict
child_attack = [adjILI/dict_seasSpatial_pop[(s, spatial, 'C')] for adjILI in dict_spatialAgeILIadj53ls[(s, spatial, 'C')]]
adult_attack = [adjILI/dict_seasSpatial_pop[(s, spatial, 'A')] for adjILI in dict_spatialAgeILIadj53ls[(s, spatial, 'A')]]
RR = [a/c if c and a else float('nan') for c, a in zip(child_attack, adult_attack)]
dict_spatialRR53ls[(s, spatial)] = RR
# spatialZRR53ls dict
normalization_period = dict_spatialRR53ls[(s, spatial)][:gp_normweeks]
season_mean = np.mean(normalization_period)
season_sd = np.std(normalization_period)
dict_spatialZRR53ls[(s, spatial)] = [(val-season_mean)/season_sd for val in dict_spatialRR53ls]
return dict_spatialTotIncid53ls, dict_spatialTotIncidAdj53ls, dict_spatialRR53ls, dict_spatialZRR53ls
##############################################
def week_zOR_processing_pandemic(dict_wk, dict_OR, baseline_text):
''' Calculate zOR by week with choice of different baselines: 'between pandemic waves', 'last season BL', or 'after pandemic'. The function week_OR_processing function must be run before this function. Return dictionaries of week to season number, week to OR, and week to zOR. SDI source files for csv_incidence and csv_population are 'SQL_export/OR_allweeks_outpatient.csv' and 'SQL_export/totalpop_age.csv' respectively. ILINet source files for csv_incidence and csv_population are 'CDC_Source/Import_Data/all_cdc_source_data.csv' and 'Census/Import_Data/totalpop_age_Census_98-14.csv' respectively.
dict_wk[week] = seasonnum
dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season
dict_OR[week] = OR
dict_zOR[week] = zOR with pandemic baseline
'''
main(week_zOR_processing_pandemic)
# dict_wk[week] = seasonnum; dict_incid[week] = ILI cases per 10,000 in US population in second calendar year of flu season, dict_OR[week] = OR
dict_zOR = {}
for s in pseasons:
weekdummy = sorted([key for key in dict_wk if dict_wk[key] == s])
# weeks from prior season
weekdummypre = sorted([key for key in dict_wk if dict_wk[key] == s-1])
if baseline_text == 'between pandemic waves':
# week indexes of 2008-09 season that occurred between pandemic waves (weeks 26-32, indexes 39:45)
season_mean = np.mean([dict_OR[wk] for wk in weekdummypre[39:46]])
season_sd = np.std([dict_OR[wk] for wk in weekdummypre[39:46]])
list_dictdummy = [(dict_OR[wk]-season_mean)/season_sd for wk in weekdummy] #/season_sd
elif baseline_text == 'last season baseline':
season_mean = np.mean([dict_OR[wk] for wk in weekdummypre[:gp_normweeks]])
season_sd = np.std([dict_OR[wk] for wk in weekdummypre[:gp_normweeks]])
list_dictdummy = [(dict_OR[wk]-season_mean)/season_sd for wk in weekdummy] #/season_sd
elif baseline_text == 'after pandemic':
season_mean = np.mean([dict_OR[wk] for wk in weekdummy[8:]])
season_sd = np.std([dict_OR[wk] for wk in weekdummy[8:]])
list_dictdummy = [(dict_OR[wk]-season_mean)/season_sd for wk in weekdummy] #/season_sd
for w, z in zip(weekdummy, list_dictdummy):
dict_zOR[w] = z
return dict_zOR
##############################################
##############################################
# footer
def main(function):
print 'Running', __name__, function.__name__
if __name__ == '__main__':
print 'Executed from the command line'
main() | mit |
slowvak/MachineLearningForMedicalImages | code/Module3.py | 1 | 11790 |
# coding: utf-8
# # Supervised Classification: SVM
#
# ## Import Libraries
# In[13]:
get_ipython().magic('matplotlib inline')
import warnings
warnings.filterwarnings('ignore')
import os
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
import pandas as pd
from matplotlib.colors import ListedColormap
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.model_selection import ShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import sklearn.metrics as metrics
from sklearn import tree
from IPython.display import Image
from sklearn.externals.six import StringIO
import pydotplus
from matplotlib.colors import Normalize
from sklearn.learning_curve import learning_curve
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
# ## Read the dataset
#
# In this case the training dataset is just a csv file. In case of larger dataset more advanced file fromats like hdf5 are used.
#
# Pandas is used to load the files.
# In[14]:
Data=pd.read_csv ('DataExample.csv')
# if you need to print or have access to the data as numpy array you can execute the following commands
# print (Data)
# print(Data.as_matrix(columns=['NAWMpost']))
# ## Creating training sets
#
# Each class of tissue in our pandas framework has a pre assigned label (Module 1).
#
# This labels were:
# - ClassTissuePost
# - ClassTissuePre
# - ClassTissueFlair
# - ClassTumorPost
# - ClassTumorPre
# - ClassTumorFlair
# - ClassEdemaPost
# - ClassEdemaPre
# - ClassEdemaFlair
#
# For demontration purposes we will create a feature vector that contains the intesities for the tumor and white matter area from the T1w pre and post contrast images.
#
# In[15]:
ClassBrainTissuepost=(Data['ClassTissuePost'].values)
ClassBrainTissuepost= (np.asarray(ClassBrainTissuepost))
ClassBrainTissuepost=ClassBrainTissuepost[~np.isnan(ClassBrainTissuepost)]
ClassBrainTissuepre=(Data[['ClassTissuePre']].values)
ClassBrainTissuepre= (np.asarray(ClassBrainTissuepre))
ClassBrainTissuepre=ClassBrainTissuepre[~np.isnan(ClassBrainTissuepre)]
ClassTUMORpost=(Data[['ClassTumorPost']].values)
ClassTUMORpost= (np.asarray(ClassTUMORpost))
ClassTUMORpost=ClassTUMORpost[~np.isnan(ClassTUMORpost)]
ClassTUMORpre=(Data[['ClassTumorPre']].values)
ClassTUMORpre= (np.asarray(ClassTUMORpre))
ClassTUMORpre=ClassTUMORpre[~np.isnan(ClassTUMORpre)]
X_1 = np.stack((ClassBrainTissuepost,ClassBrainTissuepre)) # we only take the first two features.
X_2 = np.stack((ClassTUMORpost,ClassTUMORpre))
X=np.concatenate((X_1.transpose(), X_2.transpose()),axis=0)
y =np.zeros((np.shape(X))[0])
y[np.shape(X_1)[1]:]=1
X= preprocessing.scale(X)
# In[ ]:
# **X** is the feature vector
#
# **y** are the labels
# ## Split Training/Validation
# In[16]:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# ## Create the classifier
#
# For the following example we will consider a SVM classifier.
#
# The classifier is provided by the Scikit-Learn library
# In[17]:
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.1, C=10).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Intensity post contrast')
plt.ylabel('Intensity pre contrast')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
# understanding margins
for C in [0.001,1000]:
fig = plt.subplot()
clf = svm.SVC(C,kernel='linear')
clf.fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx = np.linspace(x_min,x_max)
# print (xx)
xx=np.asarray(xx)
# get the separating hyperplane
w = clf.coef_[0]
# print(w)
a = -w[0] / w[1]
# print (a)
yy = a * xx - (clf.intercept_[0]) / w[1]
# print(yy)
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
# ## Run some basic analytics
#
#
# Calculate some basic metrics.
# In[18]:
print ('C=100')
model=svm.SVC(C=100,kernel='linear')
model.fit(X_train, y_train)
# make predictions
expected = y_test
predicted = model.predict(X_test)
# summarize the fit of the model
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
print (20*'---')
print ('C=0.0001')
model=svm.SVC(C=0.0001,kernel='linear')
model.fit(X_train, y_train)
# make predictions
expected = y_test
predicted = model.predict(X_test)
# summarize the fit of the model
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
# ## Correct way
# ### Fine tune hyperparameters
# In[19]:
gamma_val =[0.01, .2,.3,.4,.9]
classifier = svm.SVC(kernel='rbf', C=10).fit(X, y)
classifier = GridSearchCV(estimator=classifier, cv=5, param_grid=dict(gamma=gamma_val))
classifier.fit(X_train, y_train)
# ### Debug algorithm with learning curve
#
# X_train is randomly split into a training and a test set 3 times (n_iter=3). Each point on the training-score curve is the average of 3 scores where the model was trained and evaluated on the first i training examples. Each point on the cross-validation score curve is the average of 3 scores where the model was trained on the first i training examples and evaluated on all examples of the test set.
#
#
# In[20]:
title = 'Learning Curves (SVM, gamma=%.6f)' %classifier.best_estimator_.gamma
estimator = svm.SVC(kernel='rbf', C=10, gamma=classifier.best_estimator_.gamma)
plot_learning_curve(estimator, title, X_train, y_train, cv=4)
plt.show()
# In[21]:
### Final evaluation on the test set
# In[ ]:
classifier.score(X_test, y_test)
# ## Heatmap
#
# ** This will take some time...**
# In[ ]:
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
grid_clf = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid_clf.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid_clf.best_params_, grid_clf.best_score_))
# In[ ]:
plt.figure(figsize=(8, 6))
scores = grid_clf.cv_results_['mean_test_score'].reshape(len(C_range),
len(gamma_range))
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.jet,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
# In[ ]:
# In[ ]:
| mit |
rhattersley/iris | docs/iris/example_code/General/projections_and_annotations.py | 6 | 5396 | """
Plotting in different projections
=================================
This example shows how to overlay data and graphics in different projections,
demonstrating various features of Iris, Cartopy and matplotlib.
We wish to overlay two datasets, defined on different rotated-pole grids.
To display both together, we make a pseudocoloured plot of the first, overlaid
with contour lines from the second.
We also add some lines and text annotations drawn in various projections.
We plot these over a specified region, in two different map projections.
"""
import cartopy.crs as ccrs
import iris
import iris.plot as iplt
import numpy as np
import matplotlib.pyplot as plt
# Define a Cartopy 'ordinary' lat-lon coordinate reference system.
crs_latlon = ccrs.PlateCarree()
def make_plot(projection_name, projection_crs):
# Create a matplotlib Figure.
plt.figure()
# Add a matplotlib Axes, specifying the required display projection.
# NOTE: specifying 'projection' (a "cartopy.crs.Projection") makes the
# resulting Axes a "cartopy.mpl.geoaxes.GeoAxes", which supports plotting
# in different coordinate systems.
ax = plt.axes(projection=projection_crs)
# Set display limits to include a set region of latitude * longitude.
# (Note: Cartopy-specific).
ax.set_extent((-80.0, 20.0, 10.0, 80.0), crs=crs_latlon)
# Add coastlines and meridians/parallels (Cartopy-specific).
ax.coastlines(linewidth=0.75, color='navy')
ax.gridlines(crs=crs_latlon, linestyle='-')
# Plot the first dataset as a pseudocolour filled plot.
maindata_filepath = iris.sample_data_path('rotated_pole.nc')
main_data = iris.load_cube(maindata_filepath)
# NOTE: iplt.pcolormesh calls "pyplot.pcolormesh", passing in a coordinate
# system with the 'transform' keyword: This enables the Axes (a cartopy
# GeoAxes) to reproject the plot into the display projection.
iplt.pcolormesh(main_data, cmap='RdBu_r')
# Overplot the other dataset (which has a different grid), as contours.
overlay_filepath = iris.sample_data_path('space_weather.nc')
overlay_data = iris.load_cube(overlay_filepath, 'total electron content')
# NOTE: as above, "iris.plot.contour" calls "pyplot.contour" with a
# 'transform' keyword, enabling Cartopy reprojection.
iplt.contour(overlay_data, 20,
linewidths=2.0, colors='darkgreen', linestyles='-')
# Draw a margin line, some way in from the border of the 'main' data...
# First calculate rectangle corners, 7% in from each corner of the data.
x_coord, y_coord = main_data.coord(axis='x'), main_data.coord(axis='y')
x_start, x_end = np.min(x_coord.points), np.max(x_coord.points)
y_start, y_end = np.min(y_coord.points), np.max(y_coord.points)
margin = 0.07
margin_fractions = np.array([margin, 1.0 - margin])
x_lower, x_upper = x_start + (x_end - x_start) * margin_fractions
y_lower, y_upper = y_start + (y_end - y_start) * margin_fractions
box_x_points = x_lower + (x_upper - x_lower) * np.array([0, 1, 1, 0, 0])
box_y_points = y_lower + (y_upper - y_lower) * np.array([0, 0, 1, 1, 0])
# Get the Iris coordinate sytem of the X coordinate (Y should be the same).
cs_data1 = x_coord.coord_system
# Construct an equivalent Cartopy coordinate reference system ("crs").
crs_data1 = cs_data1.as_cartopy_crs()
# Draw the rectangle in this crs, with matplotlib "pyplot.plot".
# NOTE: the 'transform' keyword specifies a non-display coordinate system
# for the plot points (as used by the "iris.plot" functions).
plt.plot(box_x_points, box_y_points, transform=crs_data1,
linewidth=2.0, color='white', linestyle='--')
# Mark some particular places with a small circle and a name label...
# Define some test points with latitude and longitude coordinates.
city_data = [('London', 51.5072, 0.1275),
('Halifax, NS', 44.67, -63.61),
('Reykjavik', 64.1333, -21.9333)]
# Place a single marker point and a text annotation at each place.
for name, lat, lon in city_data:
plt.plot(lon, lat, marker='o', markersize=7.0, markeredgewidth=2.5,
markerfacecolor='black', markeredgecolor='white',
transform=crs_latlon)
# NOTE: the "plt.annotate call" does not have a "transform=" keyword,
# so for this one we transform the coordinates with a Cartopy call.
at_x, at_y = ax.projection.transform_point(lon, lat,
src_crs=crs_latlon)
plt.annotate(
name, xy=(at_x, at_y), xytext=(30, 20), textcoords='offset points',
color='black', backgroundcolor='white', size='large',
arrowprops=dict(arrowstyle='->', color='white', linewidth=2.5))
# Add a title, and display.
plt.title('A pseudocolour plot on the {} projection,\n'
'with overlaid contours.'.format(projection_name))
iplt.show()
def main():
# Enable a future option, to ensure that the netcdf load works the same way
# as in future Iris versions.
iris.FUTURE.netcdf_promote = True
# Demonstrate with two different display projections.
make_plot('Equidistant Cylindrical', ccrs.PlateCarree())
make_plot('North Polar Stereographic', ccrs.NorthPolarStereo())
if __name__ == '__main__':
main()
| lgpl-3.0 |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| mit |
gengliangwang/spark | python/pyspark/pandas/data_type_ops/categorical_ops.py | 1 | 1062 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.pandas.data_type_ops.base import DataTypeOps
class CategoricalOps(DataTypeOps):
"""
The class for binary operations of pandas-on-Spark objects with categorical types.
"""
@property
def pretty_name(self) -> str:
return 'categoricals'
| apache-2.0 |
stylianos-kampakis/scikit-learn | benchmarks/bench_mnist.py | 76 | 6136 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rat
------------------------------------------------------------
Nystroem-SVM 105.07s 0.91s 0.0227
ExtraTrees 48.20s 1.22s 0.0288
RandomForest 47.17s 1.21s 0.0304
SampledRBF-SVM 140.45s 0.84s 0.0486
CART 22.84s 0.16s 0.1214
dummy 0.01s 0.02s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM':
make_pipeline(Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM':
make_pipeline(RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LinearRegression-SAG': LogisticRegression(solver='sag', tol=1e-1, C=1e4)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
stitchfix/pyxley | tests/app/components/plotly.py | 1 | 1349 |
from pyxley.charts.plotly import PlotlyAPI
from pyxley.filters import SelectButton
from pyxley import UILayout
import pandas as pd
from flask import jsonify, request
def make_plotly_ui():
filename = "../examples/metricsgraphics/project/fitbit_data.csv"
df = pd.read_csv(filename)
# Make a UI
ui = UILayout("PyxleyChart")
# Read in the data and stack it, so that we can filter on columns
_stack = df.set_index("Date").stack().reset_index()
_stack = _stack.rename(columns={"level_1": "Data", 0: "value"})
# Make a Button
cols = [c for c in df.columns if c != "Date"]
btn = SelectButton("Data", cols, "Data", "Steps")
# add the button to the UI
ui.add_filter(btn)
init_params = {"Data": "Steps"}
def get_data():
args = {}
for c in init_params:
if request.args.get(c):
args[c] = request.args[c]
else:
args[c] = init_params[c]
return jsonify(
PlotlyAPI.line_plot(
PlotlyAPI.apply_filters(_stack, args),
[("Date", "value")],
"lines+markers",
{}
))
_plot = PlotlyAPI(
"plotly_chart",
"/api/plotly_line_plot/",
get_data,
init_params=init_params
)
ui.add_chart(_plot)
return ui
| mit |
ephes/scikit-learn | sklearn/metrics/setup.py | 299 | 1024 | import os
import os.path
import numpy
from numpy.distutils.misc_util import Configuration
from sklearn._build_utils import get_blas_info
def configuration(parent_package="", top_path=None):
config = Configuration("metrics", parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension("pairwise_fast",
sources=["pairwise_fast.c"],
include_dirs=[os.path.join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
| bsd-3-clause |
dieterich-lab/riboseq-utils | riboutils/extract_metagene_profiles.py | 1 | 10027 | #! /usr/bin/env python3
import argparse
import collections
import numpy as np
import pandas as pd
import sys
import bio_utils.bam_utils as bam_utils
import bio_utils.bed_utils as bed_utils
import misc.utils as utils
import misc.pandas_utils as pandas_utils
import logging
import misc.logging_utils as logging_utils
logger = logging.getLogger(__name__)
default_num_cpus = 1
default_lengths = []
default_start_upstream = 300
default_start_downstream = 300
default_end_upstream = 300
default_end_downstream = 300
def get_interval_df(start, end, seqname, strand):
interval_df = pd.DataFrame()
interval_df['start'] = start
interval_df['end'] = end
interval_df['seqname'] = seqname
interval_df['strand'] = strand
interval_df['id'] = "."
interval_df['score'] = 0
return interval_df
def get_length_strand_profiles(matches, profile_length):
init = lambda: np.zeros(profile_length, int)
length_strand_profiles = collections.defaultdict(init)
for match in matches:
position_info = match.position_info
relative_offset = int(match.relative_offset)
strand = position_info[5]
length = int(position_info[6])
profile = length_strand_profiles[(length, strand)]
profile[relative_offset] += 1
return length_strand_profiles
def get_metagene_profile_df(length, type_label, length_strand_profiles, upstream, downstream):
reverse_metagene_profile = length_strand_profiles[(length, '-')]
forward_metagene_profile = length_strand_profiles[(length, '+')]
metagene_profile = forward_metagene_profile + reverse_metagene_profile[::-1]
if sum(metagene_profile) == 0:
return None
offset = range(-1*upstream, downstream+1)
metagene_profile_df = pd.DataFrame()
metagene_profile_df['position'] = offset
metagene_profile_df['count'] = metagene_profile
metagene_profile_df['type'] = type_label
metagene_profile_df['length'] = length
return metagene_profile_df
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="This script extracts the metagene profile from reads in a BAM "
"file, possibly filtering by length. It attempts to vectorize as many of the "
"counting operations as possible.")
parser.add_argument('bam', help="The bam file")
parser.add_argument('orfs', help="The annotated transcripts (bed) file")
parser.add_argument('out', help="The (output) csv.gz counts file")
parser.add_argument('-p', '--num-cpus', help="The number of processors to use",
type=int, default=default_num_cpus)
parser.add_argument('--is-sam', help="If this flag is present, the alignment file will "
"be parsed as SAM rather than BAM", action='store_true')
parser.add_argument('--lengths', help="If specified, then metagene profiles will be "
"created for reads of each length. Otherwise, profiles will be created for each "
"read length present in the bam file.", type=int, nargs='*', default=default_lengths)
parser.add_argument('--start-upstream', type=int, default=default_start_upstream,
help="The number of bases upstream of the translation initiation site to begin "
"constructing the metagene profile.")
parser.add_argument('--start-downstream', type=int, default=default_start_downstream,
help="The number of bases downstream of the translation initiation site to end "
"the metagene profile.")
parser.add_argument('--end-upstream', type=int, default=default_end_upstream,
help="The number of bases upstream of the translation termination site to begin "
"constructing the metagene profile.")
parser.add_argument('--end-downstream', type=int, default=default_end_downstream,
help="The number of bases downstream of the translation termination site to end "
"the metagene profile.")
logging_utils.add_logging_options(parser)
args = parser.parse_args()
logging_utils.update_logging(args)
msg = "[extract-metagene-profiles]: {}".format(' '.join(sys.argv))
logger.info(msg)
# first, get the 5' ends of the reads
alignment_df = bam_utils.get_five_prime_ends(args.bam, progress_bar=True,
count=True, logger=logger)
msg = "Reading annotations"
logger.info(msg)
annotations_df = bed_utils.read_bed(args.orfs)
msg = "Constructing canonical translation initiation ORF data frames"
logger.info(msg)
m_has_canonical = annotations_df['thick_start'] > -1
m_forward = annotations_df['strand'] == '+'
m_canonical_forward = m_has_canonical & m_forward
m_canonical_reverse = m_has_canonical & ~m_forward
# forward translation initiation
start = annotations_df.loc[m_canonical_forward, 'thick_start'] - args.start_upstream
end = annotations_df.loc[m_canonical_forward, 'thick_start'] + args.start_downstream
seqname = annotations_df.loc[m_canonical_forward, 'seqname']
strand = '+'
intervals_forward_initiation_bed = get_interval_df(start, end, seqname, strand)
# reverse translation initation
start = annotations_df.loc[m_canonical_reverse, 'thick_end'] - args.start_downstream
end = annotations_df.loc[m_canonical_reverse, 'thick_end'] + args.start_upstream
seqname = annotations_df.loc[m_canonical_reverse, 'seqname']
strand = '-'
intervals_reverse_initiation_bed = get_interval_df(start, end, seqname, strand)
# all translation initiation regions
intervals_initiation_bed = pd.concat([intervals_forward_initiation_bed, intervals_reverse_initiation_bed])
# make sure we do not double count isoforms with the same starts
intervals_initiation_bed = intervals_initiation_bed.drop_duplicates()
msg = "Constructing canonical translation termination ORF data frames"
logger.info(msg)
# forward translation termination
start = annotations_df.loc[m_canonical_forward, 'thick_end'] - args.end_upstream
end = annotations_df.loc[m_canonical_forward, 'thick_end'] + args.end_downstream
seqname = annotations_df.loc[m_canonical_forward, 'seqname']
strand = '+'
intervals_forward_termination_bed = get_interval_df(start, end, seqname, strand)
# reverse translation termination
start = annotations_df.loc[m_canonical_reverse, 'thick_start'] - args.end_downstream
end = annotations_df.loc[m_canonical_reverse, 'thick_start'] + args.end_upstream
seqname = annotations_df.loc[m_canonical_reverse, 'seqname']
strand = '-'
intervals_reverse_termination_bed = get_interval_df(start, end, seqname, strand)
# all translation termination regions
intervals_termination_bed = pd.concat([intervals_forward_termination_bed, intervals_reverse_termination_bed])
# make sure we do not double count isoforms with the same starts
intervals_termination_bed = intervals_termination_bed.drop_duplicates()
msg = "Finding translation initiation site matches"
logger.info(msg)
initiation_matches = bed_utils.get_all_position_intersections(alignment_df, intervals_initiation_bed)
profile_length = args.start_upstream + args.start_downstream + 1
initiation_length_strand_profiles = get_length_strand_profiles(initiation_matches, profile_length)
initiation_keys_str = ','.join(str(k) for k in initiation_length_strand_profiles.keys())
msg = "Initiation keys: {}".format(initiation_keys_str)
logger.debug(msg)
msg = "Finding translation termination site matches"
logger.info(msg)
termination_matches = bed_utils.get_all_position_intersections(alignment_df, intervals_termination_bed)
profile_length = args.end_upstream + args.end_downstream + 1
termination_length_strand_profiles = get_length_strand_profiles(termination_matches, profile_length)
termination_keys_str = ','.join(str(k) for k in termination_length_strand_profiles.keys())
msg = "Termination keys: {}".format(termination_keys_str)
logger.debug(msg)
msg = "Extracting metagene profiles"
logger.info(msg)
if len(args.lengths) == 0:
args.lengths = list(alignment_df['length'].unique())
args.lengths = np.sort(args.lengths)
args.lengths = [int(l) for l in args.lengths]
length_str = ','.join(str(l) for l in args.lengths)
msg = "Profiles will be created for lengths: {}".format(length_str)
logger.info(msg)
all_metagene_profile_dfs = []
for length in args.lengths:
# first, the profile for this length around initiation sites
initiation_profile_df = get_metagene_profile_df(length,
'start',
initiation_length_strand_profiles,
args.start_upstream,
args.start_downstream)
all_metagene_profile_dfs.append(initiation_profile_df)
# and around termination sites
termination_profile_df = get_metagene_profile_df(length,
'end',
termination_length_strand_profiles,
args.end_upstream,
args.end_downstream)
all_metagene_profile_dfs.append(termination_profile_df)
# filter out all of the profiles which did not have any reads
all_metagene_profile_dfs = [df for df in all_metagene_profile_dfs if df is not None]
# join them together in one large data frame
all_metagene_profile_dfs = pd.concat(all_metagene_profile_dfs)
msg = "Writing metagene profiles to disk"
logger.info(msg)
pandas_utils.write_df(all_metagene_profile_dfs, args.out, index=False)
if __name__ == '__main__':
main()
| mit |
vmAggies/omniture-master | tests/testReports.py | 1 | 7658 | #!/usr/bin/python
import unittest
import omniture
import os
from datetime import date
import pandas
import datetime
import requests_mock
creds = {}
creds['username'] = os.environ['OMNITURE_USERNAME']
creds['secret'] = os.environ['OMNITURE_SECRET']
test_report_suite = 'omniture.api-gateway'
class ReportTest(unittest.TestCase):
def setUp(self):
self.analytics = omniture.authenticate(creds['username'], creds['secret'])
def tearDown(self):
self.analytics = None
def test_basic_report(self):
""" Make sure a basic report can be run
"""
response = self.analytics.suites[test_report_suite].report.run()
self.assertIsInstance(response.data, list, "Something went wrong with the report")
#Timing Info
self.assertIsInstance(response.timing['queue'], float, "waitSeconds info is missing")
self.assertIsInstance(response.timing['execution'], float, "Execution info is missing")
#Raw Reports
self.assertIsInstance(response.report, dict, "The raw report hasn't been populated")
#Check Metrics
self.assertIsInstance(response.metrics, list, "The metrics weren't populated")
self.assertEqual(response.metrics[0].id,"pageviews", "Wrong Metric")
#Check Elements
self.assertIsInstance(response.elements, list, "The elements is the wrong type")
self.assertEqual(response.elements[0].id,"datetime", "There are elements when there shouldn't be")
#check time range
checkdate = date.today().strftime("%a. %e %h. %Y")
self.assertEqual(response.period, checkdate)
#check segmetns
self.assertIsNone(response.segments)
#Check Data
self.assertIsInstance(response.data, list, "Data isn't getting populated right")
self.assertIsInstance(response.data[0] , dict, "The data isn't getting into the dict")
self.assertIsInstance(response.data[0]['datetime'], datetime.datetime, "The date isn't getting populated in the data")
self.assertIsInstance(response.data[0]['pageviews'], int, "The pageviews aren't getting populated in the data")
def test_ranked_report(self):
""" Make sure the ranked report is being processed
"""
ranked = self.analytics.suites[test_report_suite].report.element("page").metric("pageviews").metric("visits")
queue = []
queue.append(ranked)
response = omniture.sync(queue)
for report in response:
#Check Data
self.assertIsInstance(report.data, list, "Data isn't getting populated right")
self.assertIsInstance(report.data[0] , dict, "The data isn't getting into the dict")
self.assertIsInstance(report.data[0]['page'], str, "The page isn't getting populated in the data")
self.assertIsInstance(report.data[0]['pageviews'], int, "The pageviews aren't getting populated in the data")
self.assertIsInstance(report.data[0]['visits'], int, "The visits aren't getting populated in the data")
def test_trended_report(self):
"""Make sure the trended reports are being processed corretly"""
trended = self.analytics.suites[test_report_suite].report.element("page").metric("pageviews").granularity('hour').run()
self.assertIsInstance(trended.data, list, "Treneded Reports don't work")
self.assertIsInstance(trended.data[0] , dict, "The data isn't getting into the dict")
self.assertIsInstance(trended.data[0]['datetime'], datetime.datetime, "The date isn't getting propulated correctly")
self.assertIsInstance(trended.data[0]['page'], str, "The page isn't getting populated in the data")
self.assertIsInstance(trended.data[0]['pageviews'], int, "The pageviews aren't getting populated in the data")
def test_dataframe(self):
"""Make sure the pandas data frame object can be generated"""
trended = self.analytics.suites[test_report_suite].report.element("page").metric("pageviews").granularity('hour').run()
self.assertIsInstance(trended.dataframe, pandas.DataFrame, "Data Frame Object doesn't work")
def test_segments_id(self):
""" Make sure segments can be added """
suite = self.analytics.suites[test_report_suite]
report = suite.report.filter(suite.segments[0]).run()
self.assertEqual(report.segments[0], suite.segments[0], "The segments don't match")
@unittest.skip("skip inline segments because checked in Query")
def test_inline_segment(self):
""" Make sure inline segments work """
#pretty poor check but need to make it work with any report suite
report = self.analytics.suites[0].report.element('page').metric('pageviews').metric('visits').filter(element='browser', selected=["::unspecified::"]).run()
self.assertIsInstance(report.data, list, "inline segments don't work")
@requests_mock.mock()
def test_multiple_classifications(self, m):
"""Makes sure the report can parse multiple classifications correctly since they have the same element ID"""
#load sample file
path = os.path.dirname(__file__)
with open(path+'/mock_objects/multi_classifications.json') as data_file:
json_response = data_file.read()
with open(path+'/mock_objects/Report.Queue.json') as queue_file:
ReportQueue = queue_file.read()
#setup mock object
m.post('https://api.omniture.com/admin/1.4/rest/?method=Company.GetReportSuites', text=json_response)
m.post('https://api.omniture.com/admin/1.4/rest/?method=Report.Get', text=json_response)
m.post('https://api.omniture.com/admin/1.4/rest/?method=Report.Queue', text=ReportQueue)
report = self.analytics.suites[0].report\
.element('evar2',classification="Classification 1", disable_validation=True)\
.element('evar2',classification="Classification 2", disable_validation=True)\
report = report.run()
self.assertTrue('evar2 | Classification 1' in report.data[0], "The Value of report.data[0] was:{}".format(report.data[0]))
self.assertTrue('evar2 | Classification 2' in report.data[0], "The Value of report.data[0] was:{}".format(report.data[0]))
@requests_mock.mock()
def test_mixed_classifications(self, m):
"""Makes sure the report can parse reports with classifications and
regular dimensionscorrectly since they have the same element ID"""
#load sample file
path = os.path.dirname(__file__)
with open(path+'/mock_objects/mixed_classifications.json') as data_file:
json_response = data_file.read()
with open(path+'/mock_objects/Report.Queue.json') as queue_file:
ReportQueue = queue_file.read()
#setup mock object
m.post('https://api.omniture.com/admin/1.4/rest/?method=Company.GetReportSuites', text=json_response)
m.post('https://api.omniture.com/admin/1.4/rest/?method=Report.Get', text=json_response)
m.post('https://api.omniture.com/admin/1.4/rest/?method=Report.Queue', text=ReportQueue)
report = self.analytics.suites[0].report\
.element('evar3',classification="Classification 1", disable_validation=True)\
.element('evar5', disable_validation=True)\
report = report.run()
self.assertTrue('evar3 | Classification 1' in report.data[0], "The Value of report.data[0] was:{}".format(report.data[0]))
self.assertTrue('evar5' in report.data[0], "The Value of report.data[0] was:{}".format(report.data[0]))
if __name__ == '__main__':
unittest.main()
| mit |
CrazyGuo/vincent | examples/bar_chart_examples.py | 11 | 2026 | # -*- coding: utf-8 -*-
"""
Vincent Bar Chart Example
"""
#Build a Bar Chart from scratch
from vincent import *
import pandas as pd
farm_1 = {'apples': 10, 'berries': 32, 'squash': 21, 'melons': 13, 'corn': 18}
farm_2 = {'apples': 15, 'berries': 43, 'squash': 17, 'melons': 10, 'corn': 22}
farm_3 = {'apples': 6, 'berries': 24, 'squash': 22, 'melons': 16, 'corn': 30}
farm_4 = {'apples': 12, 'berries': 30, 'squash': 15, 'melons': 9, 'corn': 15}
data = [farm_1, farm_2, farm_3, farm_4]
index = ['Farm 1', 'Farm 2', 'Farm 3', 'Farm 4']
df = pd.DataFrame(data, index=index)
vis = Visualization(width=500, height=300)
vis.scales['x'] = Scale(name='x', type='ordinal', range='width',
domain=DataRef(data='table', field="data.idx"))
vis.scales['y'] = Scale(name='y', range='height', nice=True,
domain=DataRef(data='table', field="data.val"))
vis.axes.extend([Axis(type='x', scale='x'),
Axis(type='y', scale='y')])
#Marks
enter_props = PropertySet(x=ValueRef(scale='x', field="data.idx"),
y=ValueRef(scale='y', field="data.val"),
width=ValueRef(scale='x', band=True, offset=-1),
y2=ValueRef(scale='y', value=0))
update_props = PropertySet(fill=ValueRef(value='steelblue'))
mark = Mark(type='rect', from_=MarkRef(data='table'),
properties=MarkProperties(enter=enter_props,
update=update_props))
vis.marks.append(mark)
data = Data.from_pandas(df['apples'])
#Using a Vincent KeyedList here
vis.data['table'] = data
vis.axis_titles(x='Farms', y='Data')
vis.to_json('vega.json')
#Convenience methods
vis = Bar(df['apples'])
#Fruit
trans = df.T
vis = Bar(trans['Farm 1'])
#From dict
vis = Bar(farm_1)
#From dict of iterables
vis = Bar({'x': ['apples', 'berries', 'squash', 'melons', 'corn'],
'y': [10, 32, 21, 13, 18]}, iter_idx='x')
#Finally, a boring bar chart from a list
vis = Bar([10, 20, 30, 15, 35, 10, 20])
| mit |
djfan/why_yellow_taxi | Sjoin/Sjoin_Pyspark_1.py | 1 | 3068 | import pyproj
import csv
import shapely.geometry as geom
import fiona
import fiona.crs
import shapely
import rtree
import geopandas as gpd
import numpy as np
import operator
import pandas as pd
def countLine(partID, records):
import pyproj
import csv
import shapely.geometry as geom
import fiona
import fiona.crs
import shapely
import rtree
import geopandas as gpd
import numpy as np
import operator
import pandas as pd
#[set dir]
taxi = pd.read_pickle('../why_yellow_taxi/Data/df_shuffle.pkl')
#[If necessary]
del taxi['Unnamed: 0']
#[set dir]
shapefile = '../why_yellow_taxi/Buffer/entr_buffer_100_feet_epsg4269_nad83/entr_buffer_100_feet_epsg4269_nad83.shp'
entr_buf = gpd.read_file(shapefile)
entr_buf = entr_buf.to_crs(fiona.crs.from_epsg(2263))
routes = ['Route_' + str(n) for n in range(1, 12)]
entr2line = []
for i in xrange(len(entr_buf)):
lines = []
for line in list(entr_buf.loc[:,routes].ix[i].dropna().values):
try:
line = str(int(line))
except ValueError:
pass
lines.append(line)
entr2line.append(lines)
entr_buf['entr2line'] = entr2line
index = rtree.Rtree()
for idx, geometry in enumerate(entr_buf.geometry):
index.insert(idx, geometry.bounds)
entr_lines = {}
proj = pyproj.Proj(init='epsg:2263', preserve_units=True)
if partID==0:
records.next()
reader = csv.reader(records)
for row in reader:
if ((float(row[5])!=0) and float(row[9]!=0)):
p = geom.Point(proj(float(row[5]), float(row[6])))
d = geom.Point(proj(float(row[9]), float(row[10])))
p_potential = index.intersection((p.x,p.y,p.x,p.y))
d_potential = index.intersection((d.x,d.y,d.x,d.y))
p_match = None # The first one match, should be the closest one? No!
d_match = None
for p_idx in p_potential:
if entr_buf.geometry[p_idx].contains(p):
p_match = p_idx # print 'p',p_idx
p_lines = set(entr_buf.entr2line[p_idx])
break
for d_idx in d_potential:
if entr_buf.geometry[d_idx].contains(d):
d_match = d_idx # print 'd',d_idx
d_lines = set(entr_buf.entr2line[d_idx])
break
if ((p_match and d_match) and (p_match != d_match)):
dirct_lines = tuple(p_lines.intersection(d_lines))
if dirct_lines:
entr_lines[dirct_lines] = entr_lines.get(dirct_lines, 0)+1
return entr_lines.items()
def mapper(record):
for key in record[0]:
yield key, record[1]
if __name__ == '__main__':
taxi_csv = './df_shuffle.csv'
rdd = sc.textFile(taxi_csv)
counts = rdd.mapPartitionsWithIndex(countLine).flatMap(mapper).reduceByKey(lambda x,y: x+y).collect()
print counts | mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.